In [1]:
%matplotlib inline
import pandas as pd 
import numpy as np
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.formula.api import ols
from scipy import stats
In [2]:
data = pd.read_csv("playlists.csv", sep=";", encoding = "ISO-8859-1") 
In [3]:
data.describe(include="all")
Out[3]:
company playlist_sample namesfiles no artist song sampleratefiles totalsamplesfiles durationfiles bitratefiles ... chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 attackslopefiles attackleapfiles
count 1782 1782.000000 1782 1782.000000 1782 1782 1782.0 1.782000e+03 1782.000000 1782.000000 ... 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000 1782.000000
unique 6 NaN 515 NaN 353 443 NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
top Arte Francés NaN 30 - Kasbo - Your Tempo.mp3 ... NaN Satin Jackets Hula Hoop.mp3 ... NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
freq 441 NaN 6 NaN 51 12 NaN NaN NaN NaN ... NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
mean NaN 2.084175 NaN 17.116162 NaN NaN 44100.0 1.043632e+07 236.651237 252.336700 ... 0.332301 0.319191 0.265246 0.440462 0.549565 0.581967 0.477825 0.430522 15.804409 0.507503
std NaN 1.114796 NaN 11.837401 NaN NaN 0.0 3.227105e+06 73.176981 88.377597 ... 0.270616 0.263919 0.249612 0.290454 0.314771 0.323173 0.321646 0.295563 9.338659 0.247587
min NaN 1.000000 NaN 1.000000 NaN NaN 44100.0 5.965054e+06 135.262000 128.000000 ... 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.168304
25% NaN 1.000000 NaN 8.000000 NaN NaN 44100.0 8.353151e+06 189.413850 128.000000 ... 0.116591 0.109123 0.075378 0.204738 0.301961 0.316822 0.214680 0.203097 9.810711 0.285584
50% NaN 2.000000 NaN 15.500000 NaN NaN 44100.0 9.480378e+06 214.974562 320.000000 ... 0.262799 0.259887 0.183082 0.396861 0.520957 0.570088 0.418077 0.379737 14.833864 0.452456
75% NaN 3.000000 NaN 24.000000 NaN NaN 44100.0 1.146931e+07 260.075075 320.000000 ... 0.494897 0.469603 0.384485 0.642814 0.829318 0.918554 0.735384 0.616558 19.964413 0.730669
max NaN 5.000000 NaN 65.000000 NaN NaN 44100.0 2.843136e+07 644.702000 320.000000 ... 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 66.233620 0.999408

11 rows × 64 columns

Find positives and negatives songs of the process for every company.

In [4]:
companies = data['company'].unique()
by_company = [data[data.company == company] for company in companies]
positives = []
negatives = []
for data_com in by_company:
    data_com = data_com.sort_values('playlist_sample')
    last_pl = int(data_com.tail(1).playlist_sample)
    #pls = pd.DataFrame({'pl':range (1,last_pl+1), 'old':[0]*last_pl, 'new':[0]*last_pl})
    #curr_pl = data_com.query('playlist_sample == '+str(1))
    #pls.new[0]=(curr_pl.shape[0])/3
    #sum_pls = [curr_pl.shape[0]/3]
    #for i in range(2,last_pl+1):
    #    curr_pl = data_com.query('playlist_sample == '+str(i))
    #    pre_pl = data_com.query('playlist_sample == '+str(i-1))
    #    olds = curr_pl['song'].map(pre_pl['song'].value_counts()).sum(axis = 0)/3
    #    pls.old[i-1]= olds/3 
    #    pls.new[i-1]=(curr_pl.shape[0]-olds)/3
    #    sum_pls.append(curr_pl.shape[0]/3)
    #sum_pls = pd.DataFrame(sum_pls)
    #pls_rel = (pls[['old']].div(sum_pls.values, axis=0)*100).join(pls[['new']].div(sum_pls.values, axis=0)*100)
    #pls[['old','new']].plot(kind='bar', stacked=True, title=data_com.iloc[0,0])
    #for n in pls_rel:
    #    for i, (cs, ab, pc) in enumerate(zip(pls.iloc[:, 1:].cumsum(1)[n].values, pls[n].values, pls_rel[n].values)):
    #        if(pc>0):
    #            plt.text(i, cs - ab/2, str(np.round(pc, 1)) + '%', va='center', ha='center')
    df_last_pl= data_com.query('playlist_sample == '+str(last_pl))
    positives.append(df_last_pl)
    pos_loc = pd.DataFrame({}, columns=data_com.columns)
    rep=0
    for index, row in data_com[data_com.playlist_sample<last_pl].iterrows(): 
        if not ((df_last_pl['artist'] == row['artist']) & (df_last_pl['song'] ==  row['song'])).any() and pos_loc[(pos_loc['artist']== row['artist']) & (pos_loc['song'] ==  row['song'])].shape[0]<3:
            pos_loc= pos_loc.append(row, ignore_index=True)
        else:
            rep+=1
    #n_vs_p = pd.DataFrame({'sam':['pos', 'neg'],'num':[df_last_pl.shape[0]/3,pos_loc.shape[0]/3]})
    # n_vs_p.plot.bar(x='sam', y='num', rot=0, title=data_com.iloc[0,0])
    negatives.append(pos_loc)
In [5]:
df_n_ps = []
for i in range(len(negatives)):
    negatives[i]['chosen']=0
    positives[i]['chosen']=1
    df_n_ps.append(negatives[i].append(positives[i]))
D:\Usuarios\1144084318\AppData\Roaming\Python\Python37\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  after removing the cwd from sys.path.

ANOVA

In [6]:
import warnings
import matplotlib.pyplot as plt
import math
import seaborn as sns
warnings.filterwarnings('ignore')
In [27]:
alpha = 0.05
for df_n_p in df_n_ps:
    df_n_p = df_n_p.fillna(0)
    fig = plt.figure(figsize=(17,200))
    i=1
    for index in range(10,df_n_p.shape[1]-1):
        print(name)
        name= df_n_p.columns.values[index]
        df_n_p[name]=df_n_p[name].astype('float64') 
        mc = MultiComparison(df_n_p[name], df_n_p['chosen'])
        mc_results = mc.tukeyhsd()
        if mc_results._results_table.data[1:][0][5]:
            # print(name)
            results = ols(name+' ~ C(chosen)', data=df_n_p).fit()
            homogeneity_test = stats.levene(df_n_p[name][df_n_p['chosen'] == 0], df_n_p[name][df_n_p['chosen'] == 1])[1]
            normality_test = stats.shapiro(results.resid)[1]
            if homogeneity_test > alpha and normality_test > alpha:
                # print(results.summary())
                ax = fig.add_subplot(math.ceil(df_n_p.shape[1]-9/2), 2, i)
                sns.kdeplot(df_n_p.loc[df_n_p.chosen==0][name], shade=True, ax=ax);
                sns.kdeplot(df_n_p.loc[df_n_p.chosen==1][name], shade=True, ax=ax);
                plt.title(df_n_p.iloc[0,0].upper()+" "+name)
                plt.legend(['neg', 'pos'])
                i+=1
kurtosisfiles
rmsfiles
rmsmedianfiles
lowenergyfiles
ASRfiles
beatspectrumfiles
eventdensityfiles
tempofiles
pulseclarityfiles
zerocrossfiles
rolloffsfiles
brightnessfiles
spreadfiles
centroidfiles
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-27-4116f4441a3f> in <module>
      9         df_n_p[name]=df_n_p[name].astype('float64')
     10         mc = MultiComparison(df_n_p[name], df_n_p['chosen'])
---> 11         mc_results = mc.tukeyhsd()
     12         if mc_results._results_table.data[1:][0][5]:
     13             # print(name)

~\AppData\Roaming\Python\Python37\site-packages\statsmodels\sandbox\stats\multicomp.py in tukeyhsd(self, alpha)
   1011                                np.round(res[4][:, 0], 4),
   1012                                np.round(res[4][:, 1], 4),
-> 1013                                res[1]),
   1014                           dtype=[('group1', object),
   1015                                  ('group2', object),

~\AppData\Roaming\Python\Python37\site-packages\statsmodels\compat\python.py in lzip(*args, **kwargs)
     60 
     61     def lzip(*args, **kwargs):
---> 62         return list(zip(*args, **kwargs))
     63 
     64     def lmap(*args, **kwargs):

TypeError: zip argument #4 must support iteration
In [7]:
from collections import Counter


from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix, accuracy_score, silhouette_samples, silhouette_score, calinski_harabaz_score
from sklearn import preprocessing
from sklearn.decomposition import PCA
In [8]:
for i in range(len(companies)):
    df_n_ps[i].bitratefiles = df_n_ps[i].bitratefiles.astype('float64')
    df_n_ps[i].pitchfiles = df_n_ps[i].pitchfiles.astype('float64')
    df_n_ps[i].bestkeyfiles = df_n_ps[i].bestkeyfiles.astype('float64')
df_n_ps[0].info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 315 entries, 0 to 179
Data columns (total 65 columns):
company                 315 non-null object
playlist_sample         315 non-null object
namesfiles              315 non-null object
no                      315 non-null object
artist                  315 non-null object
song                    315 non-null object
sampleratefiles         315 non-null object
totalsamplesfiles       315 non-null object
durationfiles           315 non-null float64
bitratefiles            315 non-null float64
rmsfiles                315 non-null float64
rmsmedianfiles          315 non-null float64
lowenergyfiles          315 non-null float64
ASRfiles                315 non-null float64
beatspectrumfiles       315 non-null float64
eventdensityfiles       315 non-null float64
tempofiles              315 non-null float64
pulseclarityfiles       315 non-null float64
zerocrossfiles          315 non-null float64
rolloffsfiles           315 non-null float64
brightnessfiles         315 non-null float64
spreadfiles             315 non-null float64
centroidfiles           314 non-null float64
kurtosisfiles           315 non-null float64
flatnessfiles           315 non-null float64
entropyfiles            315 non-null float64
mfccfiles_1             315 non-null float64
mfccfiles_2             315 non-null float64
mfccfiles_3             315 non-null float64
mfccfiles_4             315 non-null float64
mfccfiles_5             315 non-null float64
mfccfiles_6             315 non-null float64
mfccfiles_7             315 non-null float64
mfccfiles_8             315 non-null float64
mfccfiles_9             315 non-null float64
mfccfiles_10            315 non-null float64
mfccfiles_11            315 non-null float64
mfccfiles_12            315 non-null float64
mfccfiles_13            315 non-null float64
pitchfiles              315 non-null float64
inharmonicityfiles      315 non-null float64
bestkeyfiles            315 non-null float64
keyclarityfiles         315 non-null float64
modalityfiles           315 non-null float64
tonalcentroidfiles_1    315 non-null float64
tonalcentroidfiles_2    315 non-null float64
tonalcentroidfiles_3    315 non-null float64
tonalcentroidfiles_4    315 non-null float64
tonalcentroidfiles_5    315 non-null float64
tonalcentroidfiles_6    315 non-null float64
chromagramfiles_1       315 non-null float64
chromagramfiles_2       315 non-null float64
chromagramfiles_3       315 non-null float64
chromagramfiles_4       315 non-null float64
chromagramfiles_5       315 non-null float64
chromagramfiles_6       315 non-null float64
chromagramfiles_7       315 non-null float64
chromagramfiles_8       315 non-null float64
chromagramfiles_9       315 non-null float64
chromagramfiles_10      315 non-null float64
chromagramfiles_11      315 non-null float64
chromagramfiles_12      315 non-null float64
attackslopefiles        315 non-null float64
attackleapfiles         315 non-null float64
chosen                  315 non-null int64
dtypes: float64(56), int64(1), object(8)
memory usage: 162.4+ KB

Vamos a reemplazar los NaN y entonces a normalizar los datos para que todas las variables tengan la misma importancia. Solo vamos a considerar los datos numéricos.

In [9]:
df_n_ps_std = [0]*len(companies)
for i in range(len(companies)):
    df_n_ps[i] = df_n_ps[i].fillna(0)
    df_n_ps_std[i] = pd.DataFrame(preprocessing.scale(df_n_ps[i].iloc[:,8:]))
    df_n_ps_std[i].columns=df_n_ps[i].columns[8:]
df_n_ps_std[0].mean(axis=0)
Out[9]:
durationfiles          -1.889141e-16
bitratefiles            0.000000e+00
rmsfiles                3.559763e-16
rmsmedianfiles         -2.396672e-16
lowenergyfiles          1.543739e-16
ASRfiles                1.226532e-16
beatspectrumfiles       2.269789e-16
eventdensityfiles      -6.132661e-17
tempofiles              4.103860e-16
pulseclarityfiles      -6.696583e-17
zerocrossfiles         -1.092600e-16
rolloffsfiles           2.661011e-16
brightnessfiles         1.092600e-16
spreadfiles             1.519067e-16
centroidfiles           1.501444e-16
kurtosisfiles           1.875043e-16
flatnessfiles          -4.017950e-17
entropyfiles            6.012827e-16
mfccfiles_1            -4.398598e-16
mfccfiles_2            -2.326182e-17
mfccfiles_3             6.308886e-17
mfccfiles_4             1.718202e-17
mfccfiles_5             1.832749e-17
mfccfiles_6            -3.172066e-17
mfccfiles_7            -1.400996e-16
mfccfiles_8             5.110550e-17
mfccfiles_9             3.101575e-17
mfccfiles_10           -6.485112e-17
mfccfiles_11           -4.229421e-18
mfccfiles_12           -1.304071e-17
mfccfiles_13           -1.233581e-17
pitchfiles              0.000000e+00
inharmonicityfiles     -1.009422e-15
bestkeyfiles            2.424868e-16
keyclarityfiles        -3.972131e-16
modalityfiles          -3.771234e-17
tonalcentroidfiles_1   -1.517745e-17
tonalcentroidfiles_2   -5.921189e-17
tonalcentroidfiles_3    2.326182e-17
tonalcentroidfiles_4    2.502407e-17
tonalcentroidfiles_5    3.260179e-17
tonalcentroidfiles_6   -2.361427e-17
chromagramfiles_1       6.414622e-17
chromagramfiles_2      -2.061843e-17
chromagramfiles_3      -3.489272e-17
chromagramfiles_4      -1.755210e-16
chromagramfiles_5       1.797504e-17
chromagramfiles_6      -3.101575e-17
chromagramfiles_7      -8.776049e-17
chromagramfiles_8       7.471977e-17
chromagramfiles_9      -4.194176e-17
chromagramfiles_10      3.630253e-17
chromagramfiles_11      5.057683e-17
chromagramfiles_12     -5.894756e-17
attackslopefiles       -6.626093e-17
attackleapfiles        -1.423905e-16
chosen                  7.218212e-16
dtype: float64
In [10]:
df_n_ps_std[0].std(axis=0)
Out[10]:
durationfiles           1.001591
bitratefiles            0.000000
rmsfiles                1.001591
rmsmedianfiles          1.001591
lowenergyfiles          1.001591
ASRfiles                1.001591
beatspectrumfiles       1.001591
eventdensityfiles       1.001591
tempofiles              1.001591
pulseclarityfiles       1.001591
zerocrossfiles          1.001591
rolloffsfiles           1.001591
brightnessfiles         1.001591
spreadfiles             1.001591
centroidfiles           1.001591
kurtosisfiles           1.001591
flatnessfiles           1.001591
entropyfiles            1.001591
mfccfiles_1             1.001591
mfccfiles_2             1.001591
mfccfiles_3             1.001591
mfccfiles_4             1.001591
mfccfiles_5             1.001591
mfccfiles_6             1.001591
mfccfiles_7             1.001591
mfccfiles_8             1.001591
mfccfiles_9             1.001591
mfccfiles_10            1.001591
mfccfiles_11            1.001591
mfccfiles_12            1.001591
mfccfiles_13            1.001591
pitchfiles              0.000000
inharmonicityfiles      1.001591
bestkeyfiles            1.001591
keyclarityfiles         1.001591
modalityfiles           1.001591
tonalcentroidfiles_1    1.001591
tonalcentroidfiles_2    1.001591
tonalcentroidfiles_3    1.001591
tonalcentroidfiles_4    1.001591
tonalcentroidfiles_5    1.001591
tonalcentroidfiles_6    1.001591
chromagramfiles_1       1.001591
chromagramfiles_2       1.001591
chromagramfiles_3       1.001591
chromagramfiles_4       1.001591
chromagramfiles_5       1.001591
chromagramfiles_6       1.001591
chromagramfiles_7       1.001591
chromagramfiles_8       1.001591
chromagramfiles_9       1.001591
chromagramfiles_10      1.001591
chromagramfiles_11      1.001591
chromagramfiles_12      1.001591
attackslopefiles        1.001591
attackleapfiles         1.001591
chosen                  1.001591
dtype: float64

Borramos pitch y bitrate porque todos sus valores son 0.

In [11]:
for i in range(len(companies)):
    df_n_ps_std[i] = df_n_ps_std[i].drop(columns="pitchfiles")
    df_n_ps_std[i] = df_n_ps_std[i].drop(columns="bitratefiles")
In [12]:
df_n_ps_std[0].columns
Out[12]:
Index(['durationfiles', 'rmsfiles', 'rmsmedianfiles', 'lowenergyfiles',
       'ASRfiles', 'beatspectrumfiles', 'eventdensityfiles', 'tempofiles',
       'pulseclarityfiles', 'zerocrossfiles', 'rolloffsfiles',
       'brightnessfiles', 'spreadfiles', 'centroidfiles', 'kurtosisfiles',
       'flatnessfiles', 'entropyfiles', 'mfccfiles_1', 'mfccfiles_2',
       'mfccfiles_3', 'mfccfiles_4', 'mfccfiles_5', 'mfccfiles_6',
       'mfccfiles_7', 'mfccfiles_8', 'mfccfiles_9', 'mfccfiles_10',
       'mfccfiles_11', 'mfccfiles_12', 'mfccfiles_13', 'inharmonicityfiles',
       'bestkeyfiles', 'keyclarityfiles', 'modalityfiles',
       'tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6',
       'chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12',
       'attackslopefiles', 'attackleapfiles', 'chosen'],
      dtype='object')

MFCC

In [71]:
df_n_ps_std[0].columns[17:30]
Out[71]:
Index(['mfccfiles_1', 'mfccfiles_2', 'mfccfiles_3', 'mfccfiles_4',
       'mfccfiles_5', 'mfccfiles_6', 'mfccfiles_7', 'mfccfiles_8',
       'mfccfiles_9', 'mfccfiles_10', 'mfccfiles_11', 'mfccfiles_12',
       'mfccfiles_13'],
      dtype='object')
In [72]:
df_n_ps_std_mfcc = [None]*len(companies)
for i in range(len(companies)):
    df_n_ps_std_mfcc[i] = pd.DataFrame(df_n_ps_std[i].iloc[:,17:30])
    df_n_ps_std_mfcc[i].columns=df_n_ps_std[i].columns[17:30]
df_n_ps_std_mfcc[0].info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 315 entries, 0 to 314
Data columns (total 13 columns):
mfccfiles_1     315 non-null float64
mfccfiles_2     315 non-null float64
mfccfiles_3     315 non-null float64
mfccfiles_4     315 non-null float64
mfccfiles_5     315 non-null float64
mfccfiles_6     315 non-null float64
mfccfiles_7     315 non-null float64
mfccfiles_8     315 non-null float64
mfccfiles_9     315 non-null float64
mfccfiles_10    315 non-null float64
mfccfiles_11    315 non-null float64
mfccfiles_12    315 non-null float64
mfccfiles_13    315 non-null float64
dtypes: float64(13)
memory usage: 32.1 KB

Arte Francés

ANN

In [23]:
import keras
keras.__version__
Using TensorFlow backend.
Out[23]:
'2.3.0'
In [24]:
from keras.layers import Input, Flatten, Dense#, Lambda
from keras.models import Model
from keras import layers
from keras import models, optimizers

from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV #permite buscar la mejor configuración de parámetros con C-V
from sklearn.metrics import make_scorer # permite crear una clase scorer a partir de una función de score (necesario para el kappa)
from sklearn.metrics import accuracy_score, cohen_kappa_score, classification_report, roc_auc_score
from sklearn.model_selection import train_test_split #metodo de particionamiento de datasets para evaluación
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
In [108]:
X = df_n_ps_std_mfcc[0]
In [109]:
y = df_n_ps[0]['chosen']
In [110]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [111]:
X_train.shape
Out[111]:
(236, 14)
In [382]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [383]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [384]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [57]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.003, 'max_iter': 2000}, que permiten obtener un Accuracy de 81.78% y un Kappa del 50.99
Tiempo total: 61.22 minutos
In [31]:
grid.best_params_={'activation': 'sigmoid', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.003, 'max_iter': 2000}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [32]:
input_tensor = Input(shape = (n0,))
In [33]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [34]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [35]:
model.summary()
Model: "model_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_2 (Dense)              (None, 30)                420       
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
In [36]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 236 samples, validate on 79 samples
Epoch 1/2000
236/236 [==============================] - 4s 17ms/step - loss: 0.8403 - accuracy: 0.3220 - val_loss: 0.7849 - val_accuracy: 0.2911
Epoch 2/2000
236/236 [==============================] - 0s 89us/step - loss: 0.7231 - accuracy: 0.4110 - val_loss: 0.6686 - val_accuracy: 0.5823
Epoch 3/2000
236/236 [==============================] - 0s 97us/step - loss: 0.6456 - accuracy: 0.6992 - val_loss: 0.5972 - val_accuracy: 0.7722
Epoch 4/2000
236/236 [==============================] - 0s 97us/step - loss: 0.6058 - accuracy: 0.6992 - val_loss: 0.5565 - val_accuracy: 0.7722
Epoch 5/2000
236/236 [==============================] - 0s 89us/step - loss: 0.5886 - accuracy: 0.7076 - val_loss: 0.5358 - val_accuracy: 0.7722
Epoch 6/2000
236/236 [==============================] - 0s 89us/step - loss: 0.5802 - accuracy: 0.7076 - val_loss: 0.5249 - val_accuracy: 0.7722
Epoch 7/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5748 - accuracy: 0.7076 - val_loss: 0.5191 - val_accuracy: 0.7722
Epoch 8/2000
236/236 [==============================] - 0s 85us/step - loss: 0.5701 - accuracy: 0.7076 - val_loss: 0.5152 - val_accuracy: 0.7722
Epoch 9/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5664 - accuracy: 0.7076 - val_loss: 0.5126 - val_accuracy: 0.7722
Epoch 10/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5613 - accuracy: 0.7119 - val_loss: 0.5099 - val_accuracy: 0.7848
Epoch 11/2000
236/236 [==============================] - 0s 89us/step - loss: 0.5575 - accuracy: 0.7119 - val_loss: 0.5071 - val_accuracy: 0.7848
Epoch 12/2000
236/236 [==============================] - 0s 80us/step - loss: 0.5544 - accuracy: 0.7119 - val_loss: 0.5053 - val_accuracy: 0.7848
Epoch 13/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5521 - accuracy: 0.7203 - val_loss: 0.5028 - val_accuracy: 0.7848
Epoch 14/2000
236/236 [==============================] - 0s 85us/step - loss: 0.5498 - accuracy: 0.7203 - val_loss: 0.4982 - val_accuracy: 0.7848
Epoch 15/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5466 - accuracy: 0.7161 - val_loss: 0.4979 - val_accuracy: 0.7848
Epoch 16/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5445 - accuracy: 0.7246 - val_loss: 0.4958 - val_accuracy: 0.7722
Epoch 17/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5428 - accuracy: 0.7288 - val_loss: 0.4940 - val_accuracy: 0.7722
Epoch 18/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5406 - accuracy: 0.7246 - val_loss: 0.4910 - val_accuracy: 0.7595
Epoch 19/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5381 - accuracy: 0.7288 - val_loss: 0.4885 - val_accuracy: 0.7595
Epoch 20/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5369 - accuracy: 0.7331 - val_loss: 0.4861 - val_accuracy: 0.7722

Epoch 00020: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 21/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5354 - accuracy: 0.7331 - val_loss: 0.4850 - val_accuracy: 0.7722
Epoch 22/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5346 - accuracy: 0.7331 - val_loss: 0.4830 - val_accuracy: 0.7722
Epoch 23/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5346 - accuracy: 0.7288 - val_loss: 0.4812 - val_accuracy: 0.7722
Epoch 24/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5335 - accuracy: 0.7288 - val_loss: 0.4809 - val_accuracy: 0.7595
Epoch 25/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5323 - accuracy: 0.7288 - val_loss: 0.4811 - val_accuracy: 0.7595
Epoch 26/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5317 - accuracy: 0.7373 - val_loss: 0.4806 - val_accuracy: 0.7595
Epoch 27/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5314 - accuracy: 0.7331 - val_loss: 0.4805 - val_accuracy: 0.7595
Epoch 28/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5303 - accuracy: 0.7331 - val_loss: 0.4800 - val_accuracy: 0.7595
Epoch 29/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5299 - accuracy: 0.7458 - val_loss: 0.4789 - val_accuracy: 0.7595
Epoch 30/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5293 - accuracy: 0.7500 - val_loss: 0.4795 - val_accuracy: 0.7595

Epoch 00030: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 31/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5288 - accuracy: 0.7500 - val_loss: 0.4794 - val_accuracy: 0.7595
Epoch 32/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5283 - accuracy: 0.7500 - val_loss: 0.4787 - val_accuracy: 0.7595
Epoch 33/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5282 - accuracy: 0.7500 - val_loss: 0.4780 - val_accuracy: 0.7595
Epoch 34/2000
236/236 [==============================] - 0s 157us/step - loss: 0.5278 - accuracy: 0.7500 - val_loss: 0.4773 - val_accuracy: 0.7595
Epoch 35/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5276 - accuracy: 0.7500 - val_loss: 0.4765 - val_accuracy: 0.7595
Epoch 36/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5274 - accuracy: 0.7500 - val_loss: 0.4759 - val_accuracy: 0.7595
Epoch 37/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5271 - accuracy: 0.7500 - val_loss: 0.4756 - val_accuracy: 0.7595
Epoch 38/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5269 - accuracy: 0.7500 - val_loss: 0.4753 - val_accuracy: 0.7595
Epoch 39/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5268 - accuracy: 0.7542 - val_loss: 0.4757 - val_accuracy: 0.7595
Epoch 40/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5265 - accuracy: 0.7542 - val_loss: 0.4755 - val_accuracy: 0.7595

Epoch 00040: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 41/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5263 - accuracy: 0.7542 - val_loss: 0.4755 - val_accuracy: 0.7468
Epoch 42/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5261 - accuracy: 0.7500 - val_loss: 0.4755 - val_accuracy: 0.7468
Epoch 43/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5260 - accuracy: 0.7500 - val_loss: 0.4755 - val_accuracy: 0.7468
Epoch 44/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5259 - accuracy: 0.7500 - val_loss: 0.4753 - val_accuracy: 0.7468
Epoch 45/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5258 - accuracy: 0.7500 - val_loss: 0.4751 - val_accuracy: 0.7468
Epoch 46/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5256 - accuracy: 0.7500 - val_loss: 0.4749 - val_accuracy: 0.7468
Epoch 47/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5255 - accuracy: 0.7500 - val_loss: 0.4743 - val_accuracy: 0.7468
Epoch 48/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5254 - accuracy: 0.7500 - val_loss: 0.4740 - val_accuracy: 0.7468
Epoch 49/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5253 - accuracy: 0.7542 - val_loss: 0.4737 - val_accuracy: 0.7468
Epoch 50/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5252 - accuracy: 0.7542 - val_loss: 0.4735 - val_accuracy: 0.7468

Epoch 00050: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 51/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5250 - accuracy: 0.7542 - val_loss: 0.4734 - val_accuracy: 0.7468
Epoch 52/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5250 - accuracy: 0.7542 - val_loss: 0.4732 - val_accuracy: 0.7468
Epoch 53/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5249 - accuracy: 0.7542 - val_loss: 0.4731 - val_accuracy: 0.7468
Epoch 54/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5248 - accuracy: 0.7542 - val_loss: 0.4730 - val_accuracy: 0.7468
Epoch 55/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5248 - accuracy: 0.7542 - val_loss: 0.4729 - val_accuracy: 0.7468
Epoch 56/2000
236/236 [==============================] - 0s 157us/step - loss: 0.5247 - accuracy: 0.7542 - val_loss: 0.4728 - val_accuracy: 0.7468
Epoch 57/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5247 - accuracy: 0.7500 - val_loss: 0.4728 - val_accuracy: 0.7468
Epoch 58/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5246 - accuracy: 0.7500 - val_loss: 0.4727 - val_accuracy: 0.7468
Epoch 59/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5245 - accuracy: 0.7500 - val_loss: 0.4726 - val_accuracy: 0.7468
Epoch 60/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5245 - accuracy: 0.7500 - val_loss: 0.4724 - val_accuracy: 0.7468

Epoch 00060: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 61/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5244 - accuracy: 0.7500 - val_loss: 0.4723 - val_accuracy: 0.7468
Epoch 62/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5244 - accuracy: 0.7500 - val_loss: 0.4722 - val_accuracy: 0.7468
Epoch 63/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5244 - accuracy: 0.7500 - val_loss: 0.4722 - val_accuracy: 0.7468
Epoch 64/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5244 - accuracy: 0.7500 - val_loss: 0.4721 - val_accuracy: 0.7468
Epoch 65/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5243 - accuracy: 0.7500 - val_loss: 0.4721 - val_accuracy: 0.7468
Epoch 66/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5243 - accuracy: 0.7500 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 67/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5243 - accuracy: 0.7500 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 68/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5242 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 69/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5242 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 70/2000
236/236 [==============================] - 0s 161us/step - loss: 0.5242 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468

Epoch 00070: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 71/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5242 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 72/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 73/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 74/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 75/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 76/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 77/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4720 - val_accuracy: 0.7468
Epoch 78/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5241 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 79/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 80/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468

Epoch 00080: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 81/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 82/2000
236/236 [==============================] - 0s 152us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 83/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 84/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 85/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 86/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 87/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 88/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 89/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5240 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 90/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00090: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 91/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 92/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 93/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 94/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 95/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4719 - val_accuracy: 0.7468
Epoch 96/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 97/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 98/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 99/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 100/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00100: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 101/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 102/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 103/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 104/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 105/2000
236/236 [==============================] - 0s 161us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 106/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 107/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 108/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 109/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 110/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00110: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 111/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 112/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 113/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 114/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 115/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 116/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 117/2000
236/236 [==============================] - ETA: 0s - loss: 0.4906 - accuracy: 0.81 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 118/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 119/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 120/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00120: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 121/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 122/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 123/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 124/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 125/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 126/2000
236/236 [==============================] - 0s 191us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 127/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 128/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 129/2000
236/236 [==============================] - 0s 165us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 130/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00130: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 131/2000
236/236 [==============================] - 0s 203us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 132/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 133/2000
236/236 [==============================] - 0s 157us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 134/2000
236/236 [==============================] - 0s 89us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 135/2000
236/236 [==============================] - 0s 220us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 136/2000
236/236 [==============================] - 0s 178us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 137/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 138/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 139/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 140/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00140: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 141/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 142/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 143/2000
236/236 [==============================] - 0s 186us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 144/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 145/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 146/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 147/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 148/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 149/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 150/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00150: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 151/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 152/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 153/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 154/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 155/2000
236/236 [==============================] - ETA: 0s - loss: 0.4318 - accuracy: 0.84 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 156/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 157/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 158/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 159/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 160/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00160: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 161/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 162/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 163/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 164/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 165/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 166/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 167/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 168/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 169/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 170/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00170: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 171/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 172/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 173/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 174/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 175/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 176/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 177/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 178/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 179/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 180/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00180: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 181/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 182/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 183/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 184/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 185/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 186/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 187/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 188/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 189/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 190/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00190: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 191/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 192/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 193/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 194/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 195/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 196/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 197/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 198/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 199/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 200/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00200: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 201/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 202/2000
236/236 [==============================] - ETA: 0s - loss: 0.6799 - accuracy: 0.62 - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 203/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 204/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 205/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 206/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 207/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 208/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 209/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 210/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00210: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 211/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 212/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 213/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 214/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 215/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 216/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 217/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 218/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 219/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 220/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00220: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 221/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 222/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 223/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 224/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 225/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 226/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 227/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 228/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 229/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 230/2000
236/236 [==============================] - ETA: 0s - loss: 0.4254 - accuracy: 0.81 - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00230: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 231/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 232/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 233/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 234/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 235/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 236/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 237/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 238/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 239/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 240/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00240: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 241/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 242/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 243/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 244/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 245/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 246/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 247/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 248/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 249/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 250/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00250: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 251/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 252/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 253/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 254/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 255/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 256/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 257/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 258/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 259/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 260/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00260: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 261/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 262/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 263/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 264/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 265/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 266/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 267/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 268/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 269/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 270/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00270: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 271/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 272/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 273/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 274/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 275/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 276/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 277/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 278/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 279/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 280/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00280: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 281/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 282/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 283/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 284/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 285/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 286/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 287/2000
236/236 [==============================] - 0s 85us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 288/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 289/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 290/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00290: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 291/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 292/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 293/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 294/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 295/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 296/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 297/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 298/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 299/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 300/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00300: ReduceLROnPlateau reducing learning rate to 5.5879354962651284e-12.
Epoch 301/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 302/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 303/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 304/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 305/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 306/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 307/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 308/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 309/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 310/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00310: ReduceLROnPlateau reducing learning rate to 2.7939677481325642e-12.
Epoch 311/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 312/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 313/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 314/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 315/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 316/2000
236/236 [==============================] - ETA: 0s - loss: 0.5187 - accuracy: 0.75 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 317/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 318/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 319/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 320/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00320: ReduceLROnPlateau reducing learning rate to 1.3969838740662821e-12.
Epoch 321/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 322/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 323/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 324/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 325/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 326/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 327/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 328/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 329/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 330/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00330: ReduceLROnPlateau reducing learning rate to 6.984919370331411e-13.
Epoch 331/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 332/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 333/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 334/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 335/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 336/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 337/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 338/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 339/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 340/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00340: ReduceLROnPlateau reducing learning rate to 3.4924596851657053e-13.
Epoch 341/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 342/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 343/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 344/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 345/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 346/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 347/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 348/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 349/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 350/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00350: ReduceLROnPlateau reducing learning rate to 1.7462298425828526e-13.
Epoch 351/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 352/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 353/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 354/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 355/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 356/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 357/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 358/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 359/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 360/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00360: ReduceLROnPlateau reducing learning rate to 8.731149212914263e-14.
Epoch 361/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 362/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 363/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 364/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 365/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 366/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 367/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 368/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 369/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 370/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00370: ReduceLROnPlateau reducing learning rate to 4.3655746064571316e-14.
Epoch 371/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 372/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 373/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 374/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 375/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 376/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 377/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 378/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 379/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 380/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00380: ReduceLROnPlateau reducing learning rate to 2.1827873032285658e-14.
Epoch 381/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 382/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 383/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 384/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 385/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 386/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 387/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 388/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 389/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 390/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00390: ReduceLROnPlateau reducing learning rate to 1.0913936516142829e-14.
Epoch 391/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 392/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 393/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 394/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 395/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 396/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 397/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 398/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 399/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 400/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00400: ReduceLROnPlateau reducing learning rate to 5.4569682580714145e-15.
Epoch 401/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 402/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 403/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 404/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 405/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 406/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 407/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 408/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 409/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 410/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00410: ReduceLROnPlateau reducing learning rate to 2.7284841290357072e-15.
Epoch 411/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 412/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 413/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 414/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 415/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 416/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 417/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 418/2000
236/236 [==============================] - ETA: 0s - loss: 0.4688 - accuracy: 0.81 - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 419/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 420/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00420: ReduceLROnPlateau reducing learning rate to 1.3642420645178536e-15.
Epoch 421/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 422/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 423/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 424/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 425/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 426/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 427/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 428/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 429/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 430/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00430: ReduceLROnPlateau reducing learning rate to 6.821210322589268e-16.
Epoch 431/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 432/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 433/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 434/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 435/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 436/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 437/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 438/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 439/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 440/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00440: ReduceLROnPlateau reducing learning rate to 3.410605161294634e-16.
Epoch 441/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 442/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 443/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 444/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 445/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 446/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 447/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 448/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 449/2000
236/236 [==============================] - ETA: 0s - loss: 0.5581 - accuracy: 0.71 - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 450/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00450: ReduceLROnPlateau reducing learning rate to 1.705302580647317e-16.
Epoch 451/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 452/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 453/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 454/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 455/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 456/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 457/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 458/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 459/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 460/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00460: ReduceLROnPlateau reducing learning rate to 8.526512903236585e-17.
Epoch 461/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 462/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 463/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 464/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 465/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 466/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 467/2000
236/236 [==============================] - 0s 220us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 468/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 469/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 470/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00470: ReduceLROnPlateau reducing learning rate to 4.2632564516182926e-17.
Epoch 471/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 472/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 473/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 474/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 475/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 476/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 477/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 478/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 479/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 480/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00480: ReduceLROnPlateau reducing learning rate to 2.1316282258091463e-17.
Epoch 481/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 482/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 483/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 484/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 485/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 486/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 487/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 488/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 489/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 490/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00490: ReduceLROnPlateau reducing learning rate to 1.0658141129045731e-17.
Epoch 491/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 492/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 493/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 494/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 495/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 496/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 497/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 498/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 499/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 500/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00500: ReduceLROnPlateau reducing learning rate to 5.329070564522866e-18.
Epoch 501/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 502/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 503/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 504/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 505/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 506/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 507/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 508/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 509/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 510/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00510: ReduceLROnPlateau reducing learning rate to 2.664535282261433e-18.
Epoch 511/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 512/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 513/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 514/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 515/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 516/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 517/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 518/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 519/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 520/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00520: ReduceLROnPlateau reducing learning rate to 1.3322676411307164e-18.
Epoch 521/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 522/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 523/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 524/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 525/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 526/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 527/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 528/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 529/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 530/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00530: ReduceLROnPlateau reducing learning rate to 6.661338205653582e-19.
Epoch 531/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 532/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 533/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 534/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 535/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 536/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 537/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 538/2000
236/236 [==============================] - 0s 165us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 539/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 540/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00540: ReduceLROnPlateau reducing learning rate to 3.330669102826791e-19.
Epoch 541/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 542/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 543/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 544/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 545/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 546/2000
236/236 [==============================] - 0s 220us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 547/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 548/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 549/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 550/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00550: ReduceLROnPlateau reducing learning rate to 1.6653345514133955e-19.
Epoch 551/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 552/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 553/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 554/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 555/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 556/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 557/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 558/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 559/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 560/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00560: ReduceLROnPlateau reducing learning rate to 8.326672757066978e-20.
Epoch 561/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 562/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 563/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 564/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 565/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 566/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 567/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 568/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 569/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 570/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00570: ReduceLROnPlateau reducing learning rate to 4.163336378533489e-20.
Epoch 571/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 572/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 573/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 574/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 575/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 576/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 577/2000
236/236 [==============================] - ETA: 0s - loss: 0.6339 - accuracy: 0.59 - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 578/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 579/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 580/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00580: ReduceLROnPlateau reducing learning rate to 2.0816681892667444e-20.
Epoch 581/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 582/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 583/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 584/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 585/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 586/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 587/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 588/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 589/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 590/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00590: ReduceLROnPlateau reducing learning rate to 1.0408340946333722e-20.
Epoch 591/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 592/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 593/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 594/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 595/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 596/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 597/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 598/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 599/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 600/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00600: ReduceLROnPlateau reducing learning rate to 5.204170473166861e-21.
Epoch 601/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 602/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 603/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 604/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 605/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 606/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 607/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 608/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 609/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 610/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00610: ReduceLROnPlateau reducing learning rate to 2.6020852365834305e-21.
Epoch 611/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 612/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 613/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 614/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 615/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 616/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 617/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 618/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 619/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 620/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00620: ReduceLROnPlateau reducing learning rate to 1.3010426182917153e-21.
Epoch 621/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 622/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 623/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 624/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 625/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 626/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 627/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 628/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 629/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 630/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00630: ReduceLROnPlateau reducing learning rate to 6.505213091458576e-22.
Epoch 631/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 632/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 633/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 634/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 635/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 636/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 637/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 638/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 639/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 640/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00640: ReduceLROnPlateau reducing learning rate to 3.252606545729288e-22.
Epoch 641/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 642/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 643/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 644/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 645/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 646/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 647/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 648/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 649/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 650/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00650: ReduceLROnPlateau reducing learning rate to 1.626303272864644e-22.
Epoch 651/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 652/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 653/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 654/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 655/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 656/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 657/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 658/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 659/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 660/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00660: ReduceLROnPlateau reducing learning rate to 8.13151636432322e-23.
Epoch 661/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 662/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 663/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 664/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 665/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 666/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 667/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 668/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 669/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 670/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00670: ReduceLROnPlateau reducing learning rate to 4.06575818216161e-23.
Epoch 671/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 672/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 673/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 674/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 675/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 676/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 677/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 678/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 679/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 680/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00680: ReduceLROnPlateau reducing learning rate to 2.032879091080805e-23.
Epoch 681/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 682/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 683/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 684/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 685/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 686/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 687/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 688/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 689/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 690/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00690: ReduceLROnPlateau reducing learning rate to 1.0164395455404025e-23.
Epoch 691/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 692/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 693/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 694/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 695/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 696/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 697/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 698/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 699/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 700/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00700: ReduceLROnPlateau reducing learning rate to 5.082197727702013e-24.
Epoch 701/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 702/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 703/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 704/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 705/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 706/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 707/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 708/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 709/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 710/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00710: ReduceLROnPlateau reducing learning rate to 2.5410988638510064e-24.
Epoch 711/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 712/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 713/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 714/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 715/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 716/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 717/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 718/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 719/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 720/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00720: ReduceLROnPlateau reducing learning rate to 1.2705494319255032e-24.
Epoch 721/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 722/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 723/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 724/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 725/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 726/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 727/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 728/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 729/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 730/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00730: ReduceLROnPlateau reducing learning rate to 6.352747159627516e-25.
Epoch 731/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 732/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 733/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 734/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 735/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 736/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 737/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 738/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 739/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 740/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00740: ReduceLROnPlateau reducing learning rate to 3.176373579813758e-25.
Epoch 741/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 742/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 743/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 744/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 745/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 746/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 747/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 748/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 749/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 750/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00750: ReduceLROnPlateau reducing learning rate to 1.588186789906879e-25.
Epoch 751/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 752/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 753/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 754/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 755/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 756/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 757/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 758/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 759/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 760/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00760: ReduceLROnPlateau reducing learning rate to 7.940933949534395e-26.
Epoch 761/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 762/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 763/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 764/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 765/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 766/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 767/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 768/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 769/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 770/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00770: ReduceLROnPlateau reducing learning rate to 3.9704669747671974e-26.
Epoch 771/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 772/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 773/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 774/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 775/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 776/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 777/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 778/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 779/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 780/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00780: ReduceLROnPlateau reducing learning rate to 1.9852334873835987e-26.
Epoch 781/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 782/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 783/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 784/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 785/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 786/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 787/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 788/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 789/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 790/2000
236/236 [==============================] - ETA: 0s - loss: 0.5123 - accuracy: 0.81 - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00790: ReduceLROnPlateau reducing learning rate to 9.926167436917994e-27.
Epoch 791/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 792/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 793/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 794/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 795/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 796/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 797/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 798/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 799/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 800/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00800: ReduceLROnPlateau reducing learning rate to 4.963083718458997e-27.
Epoch 801/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 802/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 803/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 804/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 805/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 806/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 807/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 808/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 809/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 810/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00810: ReduceLROnPlateau reducing learning rate to 2.4815418592294984e-27.
Epoch 811/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 812/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 813/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 814/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 815/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 816/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 817/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 818/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 819/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 820/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00820: ReduceLROnPlateau reducing learning rate to 1.2407709296147492e-27.
Epoch 821/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 822/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 823/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 824/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 825/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 826/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 827/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 828/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 829/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 830/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00830: ReduceLROnPlateau reducing learning rate to 6.203854648073746e-28.
Epoch 831/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 832/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 833/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 834/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 835/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 836/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 837/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 838/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 839/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 840/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00840: ReduceLROnPlateau reducing learning rate to 3.101927324036873e-28.
Epoch 841/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 842/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 843/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 844/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 845/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 846/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 847/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 848/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 849/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 850/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00850: ReduceLROnPlateau reducing learning rate to 1.5509636620184365e-28.
Epoch 851/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 852/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 853/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 854/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 855/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 856/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 857/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 858/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 859/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 860/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00860: ReduceLROnPlateau reducing learning rate to 7.754818310092183e-29.
Epoch 861/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 862/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 863/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 864/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 865/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 866/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 867/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 868/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 869/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 870/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00870: ReduceLROnPlateau reducing learning rate to 3.877409155046091e-29.
Epoch 871/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 872/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 873/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 874/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 875/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 876/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 877/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 878/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 879/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 880/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00880: ReduceLROnPlateau reducing learning rate to 1.9387045775230456e-29.
Epoch 881/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 882/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 883/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 884/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 885/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 886/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 887/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 888/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 889/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 890/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00890: ReduceLROnPlateau reducing learning rate to 9.693522887615228e-30.
Epoch 891/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 892/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 893/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 894/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 895/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 896/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 897/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 898/2000
236/236 [==============================] - ETA: 0s - loss: 0.5093 - accuracy: 0.75 - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 899/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 900/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00900: ReduceLROnPlateau reducing learning rate to 4.846761443807614e-30.
Epoch 901/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 902/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 903/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 904/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 905/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 906/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 907/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 908/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 909/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 910/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00910: ReduceLROnPlateau reducing learning rate to 2.423380721903807e-30.
Epoch 911/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 912/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 913/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 914/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 915/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 916/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 917/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 918/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 919/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 920/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00920: ReduceLROnPlateau reducing learning rate to 1.2116903609519035e-30.
Epoch 921/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 922/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 923/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 924/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 925/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 926/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 927/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 928/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 929/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 930/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00930: ReduceLROnPlateau reducing learning rate to 6.058451804759518e-31.
Epoch 931/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 932/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 933/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 934/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 935/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 936/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 937/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 938/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 939/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 940/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00940: ReduceLROnPlateau reducing learning rate to 3.029225902379759e-31.
Epoch 941/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 942/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 943/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 944/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 945/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 946/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 947/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 948/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 949/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 950/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00950: ReduceLROnPlateau reducing learning rate to 1.5146129511898794e-31.
Epoch 951/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 952/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 953/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 954/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 955/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 956/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 957/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 958/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 959/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 960/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00960: ReduceLROnPlateau reducing learning rate to 7.573064755949397e-32.
Epoch 961/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 962/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 963/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 964/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 965/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 966/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 967/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 968/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 969/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 970/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00970: ReduceLROnPlateau reducing learning rate to 3.7865323779746985e-32.
Epoch 971/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 972/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 973/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 974/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 975/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 976/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 977/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 978/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 979/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 980/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00980: ReduceLROnPlateau reducing learning rate to 1.8932661889873492e-32.
Epoch 981/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 982/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 983/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 984/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 985/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 986/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 987/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 988/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 989/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 990/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 00990: ReduceLROnPlateau reducing learning rate to 9.466330944936746e-33.
Epoch 991/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 992/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 993/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 994/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 995/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 996/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 997/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 998/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 999/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1000/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01000: ReduceLROnPlateau reducing learning rate to 4.733165472468373e-33.
Epoch 1001/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1002/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1003/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1004/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1005/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1006/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1007/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1008/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1009/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1010/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01010: ReduceLROnPlateau reducing learning rate to 2.3665827362341866e-33.
Epoch 1011/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1012/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1013/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1014/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1015/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1016/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1017/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1018/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1019/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1020/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01020: ReduceLROnPlateau reducing learning rate to 1.1832913681170933e-33.
Epoch 1021/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1022/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1023/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1024/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1025/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1026/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1027/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1028/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1029/2000
236/236 [==============================] - 0s 178us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1030/2000
236/236 [==============================] - 0s 271us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01030: ReduceLROnPlateau reducing learning rate to 5.916456840585466e-34.
Epoch 1031/2000
236/236 [==============================] - 0s 246us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1032/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1033/2000
236/236 [==============================] - 0s 237us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1034/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1035/2000
236/236 [==============================] - 0s 182us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1036/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1037/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1038/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1039/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1040/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01040: ReduceLROnPlateau reducing learning rate to 2.958228420292733e-34.
Epoch 1041/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1042/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1043/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1044/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1045/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1046/2000
236/236 [==============================] - ETA: 0s - loss: 0.5555 - accuracy: 0.71 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1047/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1048/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1049/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1050/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01050: ReduceLROnPlateau reducing learning rate to 1.4791142101463666e-34.
Epoch 1051/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1052/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1053/2000
236/236 [==============================] - 0s 186us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1054/2000
236/236 [==============================] - 0s 178us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1055/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1056/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1057/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1058/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1059/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1060/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01060: ReduceLROnPlateau reducing learning rate to 7.395571050731833e-35.
Epoch 1061/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1062/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1063/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1064/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1065/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1066/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1067/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1068/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1069/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1070/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01070: ReduceLROnPlateau reducing learning rate to 3.6977855253659165e-35.
Epoch 1071/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1072/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1073/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1074/2000
236/236 [==============================] - ETA: 0s - loss: 0.5019 - accuracy: 0.75 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1075/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1076/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1077/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1078/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1079/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1080/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01080: ReduceLROnPlateau reducing learning rate to 1.8488927626829582e-35.
Epoch 1081/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1082/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1083/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1084/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1085/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1086/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1087/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1088/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1089/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1090/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01090: ReduceLROnPlateau reducing learning rate to 9.244463813414791e-36.
Epoch 1091/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1092/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1093/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1094/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1095/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1096/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1097/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1098/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1099/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1100/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01100: ReduceLROnPlateau reducing learning rate to 4.6222319067073956e-36.
Epoch 1101/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1102/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1103/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1104/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1105/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1106/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1107/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1108/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1109/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1110/2000
236/236 [==============================] - ETA: 0s - loss: 0.4339 - accuracy: 0.84 - 0s 157us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01110: ReduceLROnPlateau reducing learning rate to 2.3111159533536978e-36.
Epoch 1111/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1112/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1113/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1114/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1115/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1116/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1117/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1118/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1119/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1120/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01120: ReduceLROnPlateau reducing learning rate to 1.1555579766768489e-36.
Epoch 1121/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1122/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1123/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1124/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1125/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1126/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1127/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1128/2000
236/236 [==============================] - 0s 250us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1129/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1130/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01130: ReduceLROnPlateau reducing learning rate to 5.7777898833842445e-37.
Epoch 1131/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1132/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1133/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1134/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1135/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1136/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1137/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1138/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1139/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1140/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01140: ReduceLROnPlateau reducing learning rate to 2.8888949416921223e-37.
Epoch 1141/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1142/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1143/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1144/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1145/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1146/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1147/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1148/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1149/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1150/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01150: ReduceLROnPlateau reducing learning rate to 1.4444474708460611e-37.
Epoch 1151/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1152/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1153/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1154/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1155/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1156/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1157/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1158/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1159/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1160/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01160: ReduceLROnPlateau reducing learning rate to 7.222237354230306e-38.
Epoch 1161/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1162/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1163/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1164/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1165/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1166/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1167/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1168/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1169/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1170/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01170: ReduceLROnPlateau reducing learning rate to 3.611118677115153e-38.
Epoch 1171/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1172/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1173/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1174/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1175/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1176/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1177/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1178/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1179/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1180/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01180: ReduceLROnPlateau reducing learning rate to 1.8055593385575764e-38.
Epoch 1181/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1182/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1183/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1184/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1185/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1186/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1187/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1188/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1189/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1190/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01190: ReduceLROnPlateau reducing learning rate to 9.027796692787882e-39.
Epoch 1191/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1192/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1193/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1194/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1195/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1196/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1197/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1198/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1199/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1200/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01200: ReduceLROnPlateau reducing learning rate to 4.513898346393941e-39.
Epoch 1201/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1202/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1203/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1204/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1205/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1206/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1207/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1208/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1209/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1210/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01210: ReduceLROnPlateau reducing learning rate to 2.2569495235215866e-39.
Epoch 1211/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1212/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1213/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1214/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1215/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1216/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1217/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1218/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1219/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1220/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01220: ReduceLROnPlateau reducing learning rate to 1.1284747617607933e-39.
Epoch 1221/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1222/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1223/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1224/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1225/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1226/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1227/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1228/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1229/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1230/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01230: ReduceLROnPlateau reducing learning rate to 5.642370305557806e-40.
Epoch 1231/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1232/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1233/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1234/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1235/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1236/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1237/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1238/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1239/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1240/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01240: ReduceLROnPlateau reducing learning rate to 2.821185152778903e-40.
Epoch 1241/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1242/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1243/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1244/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1245/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1246/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1247/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1248/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1249/2000
236/236 [==============================] - ETA: 0s - loss: 0.6118 - accuracy: 0.53 - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1250/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01250: ReduceLROnPlateau reducing learning rate to 1.4105890731432906e-40.
Epoch 1251/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1252/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1253/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1254/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1255/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1256/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1257/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1258/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1259/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1260/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01260: ReduceLROnPlateau reducing learning rate to 7.052945365716453e-41.
Epoch 1261/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1262/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1263/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1264/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1265/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1266/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1267/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1268/2000
236/236 [==============================] - ETA: 0s - loss: 0.5074 - accuracy: 0.75 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1269/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1270/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01270: ReduceLROnPlateau reducing learning rate to 3.5265077153198346e-41.
Epoch 1271/2000
236/236 [==============================] - 0s 174us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1272/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1273/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1274/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1275/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1276/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1277/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1278/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1279/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1280/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01280: ReduceLROnPlateau reducing learning rate to 1.7632538576599173e-41.
Epoch 1281/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1282/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1283/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1284/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1285/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1286/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1287/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1288/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1289/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1290/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01290: ReduceLROnPlateau reducing learning rate to 8.816269288299587e-42.
Epoch 1291/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1292/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1293/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1294/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1295/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1296/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1297/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1298/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1299/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1300/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01300: ReduceLROnPlateau reducing learning rate to 4.4084849687658745e-42.
Epoch 1301/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1302/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1303/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1304/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1305/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1306/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1307/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1308/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1309/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1310/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01310: ReduceLROnPlateau reducing learning rate to 2.2042424843829373e-42.
Epoch 1311/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1312/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1313/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1314/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1315/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1316/2000
236/236 [==============================] - 0s 161us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1317/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1318/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1319/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1320/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01320: ReduceLROnPlateau reducing learning rate to 1.1021212421914686e-42.
Epoch 1321/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1322/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1323/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1324/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1325/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1326/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1327/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1328/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1329/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1330/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01330: ReduceLROnPlateau reducing learning rate to 5.507102964796531e-43.
Epoch 1331/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1332/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1333/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1334/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1335/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1336/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1337/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1338/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1339/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1340/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01340: ReduceLROnPlateau reducing learning rate to 2.7535514823982655e-43.
Epoch 1341/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1342/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1343/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1344/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1345/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1346/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1347/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1348/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1349/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1350/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01350: ReduceLROnPlateau reducing learning rate to 1.3732724950383207e-43.
Epoch 1351/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1352/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1353/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1354/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1355/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1356/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1357/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1358/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1359/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1360/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01360: ReduceLROnPlateau reducing learning rate to 6.866362475191604e-44.
Epoch 1361/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1362/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1363/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1364/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1365/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1366/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1367/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1368/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1369/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1370/2000
236/236 [==============================] - 0s 152us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01370: ReduceLROnPlateau reducing learning rate to 3.433181237595802e-44.
Epoch 1371/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1372/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1373/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1374/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1375/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1376/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1377/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1378/2000
236/236 [==============================] - 0s 152us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1379/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1380/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01380: ReduceLROnPlateau reducing learning rate to 1.6815581571897805e-44.
Epoch 1381/2000
236/236 [==============================] - 0s 165us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1382/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1383/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1384/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1385/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1386/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1387/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1388/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1389/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1390/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01390: ReduceLROnPlateau reducing learning rate to 8.407790785948902e-45.
Epoch 1391/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1392/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1393/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1394/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1395/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1396/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1397/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1398/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1399/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1400/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01400: ReduceLROnPlateau reducing learning rate to 4.203895392974451e-45.
Epoch 1401/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1402/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1403/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1404/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1405/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1406/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1407/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1408/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1409/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1410/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01410: ReduceLROnPlateau reducing learning rate to 2.1019476964872256e-45.
Epoch 1411/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1412/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1413/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1414/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1415/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1416/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1417/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1418/2000
236/236 [==============================] - 0s 161us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1419/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1420/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01420: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1421/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1422/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1423/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1424/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1425/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1426/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1427/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1428/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1429/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1430/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468

Epoch 01430: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1431/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1432/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1433/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1434/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1435/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1436/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1437/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1438/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1439/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1440/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1441/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1442/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1443/2000
236/236 [==============================] - 0s 85us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1444/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1445/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1446/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1447/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1448/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1449/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1450/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1451/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1452/2000
236/236 [==============================] - 0s 174us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1453/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1454/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1455/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1456/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1457/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1458/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1459/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1460/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1461/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1462/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1463/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1464/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1465/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1466/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1467/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1468/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1469/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1470/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1471/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1472/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1473/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1474/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1475/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1476/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1477/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1478/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1479/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1480/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1481/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1482/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1483/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1484/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1485/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1486/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1487/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1488/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1489/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1490/2000
236/236 [==============================] - 0s 157us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1491/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1492/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1493/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1494/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1495/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1496/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1497/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1498/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1499/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1500/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1501/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1502/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1503/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1504/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1505/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1506/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1507/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1508/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1509/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1510/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1511/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1512/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1513/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1514/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1515/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1516/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1517/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1518/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1519/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1520/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1521/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1522/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1523/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1524/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1525/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1526/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1527/2000
236/236 [==============================] - 0s 258us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1528/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1529/2000
236/236 [==============================] - 0s 157us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1530/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1531/2000
236/236 [==============================] - 0s 157us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1532/2000
236/236 [==============================] - 0s 165us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1533/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1534/2000
236/236 [==============================] - 0s 169us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1535/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1536/2000
236/236 [==============================] - 0s 208us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1537/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1538/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1539/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1540/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1541/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1542/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1543/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1544/2000
236/236 [==============================] - 0s 254us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1545/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1546/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1547/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1548/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1549/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1550/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1551/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1552/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1553/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1554/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1555/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1556/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1557/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1558/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1559/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1560/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1561/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1562/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1563/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1564/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1565/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1566/2000
236/236 [==============================] - 0s 148us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1567/2000
236/236 [==============================] - 0s 212us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1568/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1569/2000
236/236 [==============================] - 0s 165us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1570/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1571/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1572/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1573/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1574/2000
236/236 [==============================] - 0s 144us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1575/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1576/2000
236/236 [==============================] - 0s 220us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1577/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1578/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1579/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1580/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1581/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1582/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1583/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1584/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1585/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1586/2000
236/236 [==============================] - 0s 186us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1587/2000
236/236 [==============================] - 0s 169us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1588/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1589/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1590/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1591/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1592/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1593/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1594/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1595/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1596/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1597/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1598/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1599/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1600/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1601/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1602/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1603/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1604/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1605/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1606/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1607/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1608/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1609/2000
236/236 [==============================] - 0s 186us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1610/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1611/2000
236/236 [==============================] - 0s 169us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1612/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1613/2000
236/236 [==============================] - 0s 140us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1614/2000
236/236 [==============================] - 0s 178us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1615/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1616/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1617/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1618/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1619/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1620/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1621/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1622/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1623/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1624/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1625/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1626/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1627/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1628/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1629/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1630/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1631/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1632/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1633/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1634/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1635/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1636/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1637/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1638/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1639/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1640/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1641/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1642/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1643/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1644/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1645/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1646/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1647/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1648/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1649/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1650/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1651/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1652/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1653/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1654/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1655/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1656/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1657/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1658/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1659/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1660/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1661/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1662/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1663/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1664/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1665/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1666/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1667/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1668/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1669/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1670/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1671/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1672/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1673/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1674/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1675/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1676/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1677/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1678/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1679/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1680/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1681/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1682/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1683/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1684/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1685/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1686/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1687/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1688/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1689/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1690/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1691/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1692/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1693/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1694/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1695/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1696/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1697/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1698/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1699/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1700/2000
236/236 [==============================] - ETA: 0s - loss: 0.4052 - accuracy: 0.87 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1701/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1702/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1703/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1704/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1705/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1706/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1707/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1708/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1709/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1710/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1711/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1712/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1713/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1714/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1715/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1716/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1717/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1718/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1719/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1720/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1721/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1722/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1723/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1724/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1725/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1726/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1727/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1728/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1729/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1730/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1731/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1732/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1733/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1734/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1735/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1736/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1737/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1738/2000
236/236 [==============================] - ETA: 0s - loss: 0.6498 - accuracy: 0.71 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1739/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1740/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1741/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1742/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1743/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1744/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1745/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1746/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1747/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1748/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1749/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1750/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1751/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1752/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1753/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1754/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1755/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1756/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1757/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1758/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1759/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1760/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1761/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1762/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1763/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1764/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1765/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1766/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1767/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1768/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1769/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1770/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1771/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1772/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1773/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1774/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1775/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1776/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1777/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1778/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1779/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1780/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1781/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1782/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1783/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1784/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1785/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1786/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1787/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1788/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1789/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1790/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1791/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1792/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1793/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1794/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1795/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1796/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1797/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1798/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1799/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1800/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1801/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1802/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1803/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1804/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1805/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1806/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1807/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1808/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1809/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1810/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1811/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1812/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1813/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1814/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1815/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1816/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1817/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1818/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1819/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1820/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1821/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1822/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1823/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1824/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1825/2000
236/236 [==============================] - 0s 131us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1826/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1827/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1828/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1829/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1830/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1831/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1832/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1833/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1834/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1835/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1836/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1837/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1838/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1839/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1840/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1841/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1842/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1843/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1844/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1845/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1846/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1847/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1848/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1849/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1850/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1851/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1852/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1853/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1854/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1855/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1856/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1857/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1858/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1859/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1860/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1861/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1862/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1863/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1864/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1865/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1866/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1867/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1868/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1869/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1870/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1871/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1872/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1873/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1874/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1875/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1876/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1877/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1878/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1879/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1880/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1881/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1882/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1883/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1884/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1885/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1886/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1887/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1888/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1889/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1890/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1891/2000
236/236 [==============================] - ETA: 0s - loss: 0.4209 - accuracy: 0.81 - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1892/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1893/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1894/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1895/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1896/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1897/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1898/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1899/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1900/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1901/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1902/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1903/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1904/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1905/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1906/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1907/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1908/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1909/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1910/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1911/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1912/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1913/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1914/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1915/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1916/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1917/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1918/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1919/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1920/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1921/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1922/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1923/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1924/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1925/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1926/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1927/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1928/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1929/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1930/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1931/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1932/2000
236/236 [==============================] - 0s 127us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1933/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1934/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1935/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1936/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1937/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1938/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1939/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1940/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1941/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1942/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1943/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1944/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1945/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1946/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1947/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1948/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1949/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1950/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1951/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1952/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1953/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1954/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1955/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1956/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1957/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1958/2000
236/236 [==============================] - 0s 93us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1959/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1960/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1961/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1962/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1963/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1964/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1965/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1966/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1967/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1968/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1969/2000
236/236 [==============================] - 0s 136us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1970/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1971/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1972/2000
236/236 [==============================] - 0s 119us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1973/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1974/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1975/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1976/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1977/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1978/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1979/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1980/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1981/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1982/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1983/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1984/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1985/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1986/2000
236/236 [==============================] - 0s 123us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1987/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1988/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1989/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1990/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1991/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1992/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1993/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1994/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1995/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1996/2000
236/236 [==============================] - 0s 110us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1997/2000
236/236 [==============================] - 0s 106us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1998/2000
236/236 [==============================] - 0s 114us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 1999/2000
236/236 [==============================] - 0s 97us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
Epoch 2000/2000
236/236 [==============================] - 0s 102us/step - loss: 0.5239 - accuracy: 0.7458 - val_loss: 0.4718 - val_accuracy: 0.7468
In [37]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [38]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
79/79 [==============================] - 0s 63us/step
test loss: 0.471761223636096, test accuracy: 0.746835470199585
In [40]:
y_pred = model.predict(X_test)
y_pred_d = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred_d))
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred_d))
Kappa:  0.059523809523809534
AUC ROC:  0.7723132969034608
[[57  4]
 [16  2]]

KMeans

In [102]:
X
Out[102]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.297583 1.225637 -0.367641 0.606499 0.072373 -2.029620 0.791469 0.752018 2.268802 -1.383289 0.548279 1.903211 -1.011470
1 0.637676 -1.507256 -1.572737 -0.954161 -0.857425 0.327005 0.816764 0.214245 0.241703 0.637066 1.601538 0.300317 -0.466779
2 2.236730 -0.319414 0.669910 -1.918119 -0.820882 -2.379333 -1.570021 -2.755344 -2.150610 -2.528577 -0.877081 -0.522248 -1.429911
3 0.662077 -0.381499 0.111981 -1.743808 -1.317593 -1.348534 -0.627198 -1.629882 -2.075974 -1.248765 -1.126014 -1.316359 -1.126174
4 0.736502 0.112932 -0.065024 -1.049458 -0.408043 -0.437499 0.090831 -0.852983 -1.922491 -0.284365 0.210624 -0.032122 -0.700183
5 2.044945 -1.519304 -0.247242 -1.575018 -3.066163 -3.318978 -4.228656 -1.698572 -1.328177 -1.585685 -2.068965 -1.981423 -2.570963
6 -0.163510 0.463882 0.433775 0.607952 -1.213428 0.398472 0.549864 -0.956001 -2.046092 0.848843 0.361883 -0.369335 0.023466
7 0.469947 -0.206445 -0.861641 -1.194743 -1.342011 -0.537321 -0.618801 0.066311 1.216468 2.424672 0.407859 -0.971317 -0.885313
8 0.884810 -2.954758 0.310482 -0.346234 -0.339963 -0.215837 -0.238824 -0.154440 -0.038298 0.157431 -0.417738 -0.346934 -0.136933
9 -0.966889 1.065377 -0.166673 -0.580533 0.526040 0.508878 1.073663 0.967893 0.083588 0.138440 -0.514643 0.318418 0.368555
10 0.322674 -1.252898 -1.332613 -0.629526 -1.062802 -0.073310 -0.754358 -0.444217 -0.260065 0.586226 -0.084312 0.912315 0.392903
11 -0.198884 0.112823 0.240285 0.472717 -0.142430 0.364603 0.036949 0.383315 0.567300 -0.213906 -0.666284 0.251886 0.001530
12 -0.704344 0.127023 0.716665 -0.403523 -0.515524 -0.315358 0.189853 -0.589153 -0.264368 -0.267752 -0.195456 0.069763 0.028519
13 -0.798804 -0.117919 -0.357717 0.064237 -1.152914 -0.559676 -0.651647 -0.039909 -0.177030 0.500161 -0.273613 0.258679 0.790680
14 -0.078167 -0.317301 0.539934 0.021348 -0.238394 -1.228167 -0.600813 0.330424 -0.715325 0.747390 -0.474837 -0.755240 -0.089072
15 0.239636 0.763042 0.457000 0.194748 -0.448390 -1.142082 -1.844178 0.710890 -0.411874 -0.048000 0.959817 1.034801 -0.488467
16 0.593005 -0.028653 0.182359 -0.006019 0.143398 -0.749865 0.313923 0.942236 0.207952 -0.901225 -0.147923 0.552419 -1.176684
17 -0.565089 -0.734146 0.115715 0.669649 0.299612 -0.880228 0.070142 -0.683059 -0.693274 0.872570 1.622547 -0.210766 0.369132
18 -0.286584 -1.165487 -0.511773 0.317715 0.517783 -1.379822 0.371120 0.010478 -0.697242 0.652545 -0.084622 -0.479999 1.158310
19 -0.493038 -0.532596 0.330444 0.672695 0.252734 -0.921397 -1.266250 0.422426 1.086459 2.009229 2.043563 0.507131 0.412127
20 1.367128 -0.497386 -0.060729 0.760778 -1.140772 0.151619 -1.896890 -3.328933 -1.164662 -1.274380 0.647583 1.589549 0.963561
21 0.073944 0.076483 0.128736 0.807417 -1.111030 -0.134221 -1.151001 -1.658868 -1.130705 -0.105905 -0.047689 -0.710169 1.125641
22 0.660587 -0.157368 -0.327968 0.214143 -1.218601 -0.164400 -1.547244 -1.835812 -1.428252 -0.025563 0.297802 -1.007616 0.493018
23 0.629501 0.686317 0.608046 -0.676736 -0.888682 0.627145 -1.099783 0.193809 -0.138455 -2.486797 -0.846714 -2.218162 -0.746468
24 -0.898063 2.104109 0.850318 1.640696 0.809305 1.055106 -0.219443 0.333777 -0.393837 0.524945 0.726660 -1.038018 0.706289
25 -1.139909 1.564949 0.224228 1.291732 0.521761 0.566571 -0.502128 0.174052 -0.668503 0.525822 0.608177 -0.929238 0.913787
26 -0.670112 0.400455 0.542697 0.702795 -0.312340 0.610331 0.039142 0.210504 -0.147396 0.558589 -0.037638 -1.606275 -0.202252
27 -0.698001 -0.051148 0.596315 -0.686050 -0.524681 0.775281 -0.431256 0.459523 0.649406 -1.709659 0.820167 -1.716683 -0.365401
28 0.267840 0.993382 -0.731513 -0.033601 -0.525393 0.229122 -0.296022 1.310078 0.644052 -1.340101 1.103501 0.466996 0.540105
29 -1.497035 -1.682687 -2.317594 -1.006384 -1.191688 -1.739630 -0.583086 0.367213 0.619588 -0.474930 -0.553418 -0.278936 -0.651289
... ... ... ... ... ... ... ... ... ... ... ... ... ...
285 0.054160 0.170817 0.428157 -0.173562 0.450572 0.340002 0.490456 0.213782 -0.773382 -0.563043 0.363713 -0.075376 0.254277
286 -0.837236 -0.153388 0.268665 1.306860 1.166541 1.445964 1.199082 -0.371965 -0.308775 -0.231117 0.135108 -0.309420 -0.311043
287 0.054272 0.092607 -0.112145 0.149520 -0.877907 0.167855 0.850451 0.678919 -0.080733 -0.661900 -0.257044 -0.389926 -1.512126
288 1.304506 -0.939417 -1.006008 1.020838 -1.215004 0.813775 1.376113 -1.476023 -2.020203 0.337259 2.404807 -1.045089 0.294137
289 -0.159584 -0.189562 -0.163770 0.245326 -0.612664 0.540468 0.129258 -1.140293 -1.534282 -0.446273 1.154468 0.496372 -0.844209
290 -0.506257 -0.132656 -0.153020 -0.443656 -1.058433 -1.070196 -1.502266 -0.228195 0.011621 0.395647 -0.043527 -0.074259 -0.944997
291 0.434058 0.398883 0.528841 1.249494 -0.183209 0.484735 0.107864 -0.036215 0.158756 0.063774 -0.015594 0.089898 -0.386434
292 0.539818 0.967349 0.496940 0.788583 0.455748 -0.003488 0.155054 0.250439 0.033537 -0.103080 0.763456 0.120334 -0.561083
293 0.273525 -0.141184 0.986558 0.184194 -1.414155 -0.887289 1.438143 0.760252 -0.135023 2.252775 1.900066 -0.624009 1.138131
294 1.757881 -0.657600 -0.123573 1.713741 0.702842 1.053753 0.625178 -0.475339 -0.351226 0.392355 0.055097 -0.855496 0.261994
295 1.094424 -0.403254 -0.422590 1.461920 0.799118 0.835687 0.944428 -0.017134 -0.567998 0.548997 -0.402101 -1.351402 0.844210
296 0.456168 0.097137 0.082277 0.474571 -0.507174 -0.722248 -1.600889 -0.346317 0.125499 -0.987583 -0.902738 -0.334857 0.502328
297 0.158964 1.272002 -0.348555 -0.114827 -0.295698 0.912253 -0.330632 0.873334 -0.284744 -1.295494 -0.870216 0.332550 1.474750
298 -0.259693 1.853691 0.322474 0.273480 -0.345550 0.076148 -0.807223 1.185136 0.497571 0.246526 0.604499 0.325991 0.648890
299 -2.384686 -0.883216 0.899645 1.092020 -0.445358 0.965983 0.519488 0.327030 0.842924 -0.068183 -0.353463 0.674622 0.616154
300 -1.592394 -0.161493 1.268663 1.545345 -0.785313 1.028442 0.029964 0.687974 0.585005 -0.271009 -0.309379 0.500772 0.919246
301 -1.609403 -0.213622 1.375684 1.572829 -0.631037 1.172929 0.158255 0.764985 0.633586 -0.171438 -0.369442 0.503642 1.007372
302 1.636269 0.260324 0.678085 -0.587416 -0.746366 -1.079248 -0.077932 0.638303 0.636743 0.407116 -0.026192 -0.277632 -0.299649
303 -0.468490 1.262367 1.213487 0.908453 0.995514 0.311472 0.225458 -0.034264 -0.759979 -0.553620 -0.608785 0.868899 0.308641
304 0.142289 1.055504 -0.095146 0.415827 0.418496 0.116729 0.275183 0.600879 0.346584 0.250517 0.332107 0.369426 -0.246906
305 1.335436 0.402811 -1.381643 -0.110506 0.678178 1.421056 -0.283357 0.434156 1.225881 1.955751 0.309648 -0.785677 -2.348680
306 3.500613 -0.323410 -0.934394 -3.041308 1.822358 3.047378 1.485650 2.669335 2.613811 1.719411 -1.458878 -5.545815 -3.654033
307 3.518855 -0.474530 -0.626225 -2.369853 1.940903 2.966850 1.650342 2.153435 2.683362 2.298387 -0.752895 -5.262247 -4.609324
308 -0.475987 0.385400 0.283385 0.721130 0.390121 0.767408 0.105529 1.024463 1.051642 0.737865 1.172001 0.939039 0.996757
309 1.579671 1.097062 0.916778 0.242411 1.350012 1.924193 1.269541 0.800714 0.639968 1.385723 -1.326387 0.948539 2.215496
310 -1.373143 0.545169 2.242534 0.151875 0.131935 1.422049 -0.049814 0.337962 1.705104 1.020922 1.432972 1.506929 0.326875
311 -0.555796 0.192435 -1.486600 -1.844351 -0.024486 1.541478 2.218745 1.405936 0.718192 -0.125397 -0.557874 -0.139931 1.048558
312 -0.017409 0.098378 -0.037330 0.351157 -0.857030 -1.081681 0.404292 1.175376 0.863678 1.257200 1.194305 1.918621 0.765312
313 -0.318014 0.774156 -0.322540 -0.221254 -0.285732 0.116133 0.130191 -0.130453 -0.192462 0.885042 1.494832 0.568910 -0.113303
314 -1.382512 0.949410 -0.199056 0.190953 -1.174835 -0.995885 0.048886 -0.304819 -1.529189 -1.023882 -1.763085 -0.975472 -0.705853

315 rows × 13 columns

In [103]:
X.loc[:,'chosen'] = list(y)
X
Out[103]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 chosen
0 0.297583 1.225637 -0.367641 0.606499 0.072373 -2.029620 0.791469 0.752018 2.268802 -1.383289 0.548279 1.903211 -1.011470 0
1 0.637676 -1.507256 -1.572737 -0.954161 -0.857425 0.327005 0.816764 0.214245 0.241703 0.637066 1.601538 0.300317 -0.466779 0
2 2.236730 -0.319414 0.669910 -1.918119 -0.820882 -2.379333 -1.570021 -2.755344 -2.150610 -2.528577 -0.877081 -0.522248 -1.429911 0
3 0.662077 -0.381499 0.111981 -1.743808 -1.317593 -1.348534 -0.627198 -1.629882 -2.075974 -1.248765 -1.126014 -1.316359 -1.126174 0
4 0.736502 0.112932 -0.065024 -1.049458 -0.408043 -0.437499 0.090831 -0.852983 -1.922491 -0.284365 0.210624 -0.032122 -0.700183 0
5 2.044945 -1.519304 -0.247242 -1.575018 -3.066163 -3.318978 -4.228656 -1.698572 -1.328177 -1.585685 -2.068965 -1.981423 -2.570963 0
6 -0.163510 0.463882 0.433775 0.607952 -1.213428 0.398472 0.549864 -0.956001 -2.046092 0.848843 0.361883 -0.369335 0.023466 0
7 0.469947 -0.206445 -0.861641 -1.194743 -1.342011 -0.537321 -0.618801 0.066311 1.216468 2.424672 0.407859 -0.971317 -0.885313 0
8 0.884810 -2.954758 0.310482 -0.346234 -0.339963 -0.215837 -0.238824 -0.154440 -0.038298 0.157431 -0.417738 -0.346934 -0.136933 0
9 -0.966889 1.065377 -0.166673 -0.580533 0.526040 0.508878 1.073663 0.967893 0.083588 0.138440 -0.514643 0.318418 0.368555 0
10 0.322674 -1.252898 -1.332613 -0.629526 -1.062802 -0.073310 -0.754358 -0.444217 -0.260065 0.586226 -0.084312 0.912315 0.392903 0
11 -0.198884 0.112823 0.240285 0.472717 -0.142430 0.364603 0.036949 0.383315 0.567300 -0.213906 -0.666284 0.251886 0.001530 0
12 -0.704344 0.127023 0.716665 -0.403523 -0.515524 -0.315358 0.189853 -0.589153 -0.264368 -0.267752 -0.195456 0.069763 0.028519 0
13 -0.798804 -0.117919 -0.357717 0.064237 -1.152914 -0.559676 -0.651647 -0.039909 -0.177030 0.500161 -0.273613 0.258679 0.790680 0
14 -0.078167 -0.317301 0.539934 0.021348 -0.238394 -1.228167 -0.600813 0.330424 -0.715325 0.747390 -0.474837 -0.755240 -0.089072 0
15 0.239636 0.763042 0.457000 0.194748 -0.448390 -1.142082 -1.844178 0.710890 -0.411874 -0.048000 0.959817 1.034801 -0.488467 0
16 0.593005 -0.028653 0.182359 -0.006019 0.143398 -0.749865 0.313923 0.942236 0.207952 -0.901225 -0.147923 0.552419 -1.176684 0
17 -0.565089 -0.734146 0.115715 0.669649 0.299612 -0.880228 0.070142 -0.683059 -0.693274 0.872570 1.622547 -0.210766 0.369132 0
18 -0.286584 -1.165487 -0.511773 0.317715 0.517783 -1.379822 0.371120 0.010478 -0.697242 0.652545 -0.084622 -0.479999 1.158310 0
19 -0.493038 -0.532596 0.330444 0.672695 0.252734 -0.921397 -1.266250 0.422426 1.086459 2.009229 2.043563 0.507131 0.412127 0
20 1.367128 -0.497386 -0.060729 0.760778 -1.140772 0.151619 -1.896890 -3.328933 -1.164662 -1.274380 0.647583 1.589549 0.963561 0
21 0.073944 0.076483 0.128736 0.807417 -1.111030 -0.134221 -1.151001 -1.658868 -1.130705 -0.105905 -0.047689 -0.710169 1.125641 0
22 0.660587 -0.157368 -0.327968 0.214143 -1.218601 -0.164400 -1.547244 -1.835812 -1.428252 -0.025563 0.297802 -1.007616 0.493018 0
23 0.629501 0.686317 0.608046 -0.676736 -0.888682 0.627145 -1.099783 0.193809 -0.138455 -2.486797 -0.846714 -2.218162 -0.746468 0
24 -0.898063 2.104109 0.850318 1.640696 0.809305 1.055106 -0.219443 0.333777 -0.393837 0.524945 0.726660 -1.038018 0.706289 0
25 -1.139909 1.564949 0.224228 1.291732 0.521761 0.566571 -0.502128 0.174052 -0.668503 0.525822 0.608177 -0.929238 0.913787 0
26 -0.670112 0.400455 0.542697 0.702795 -0.312340 0.610331 0.039142 0.210504 -0.147396 0.558589 -0.037638 -1.606275 -0.202252 0
27 -0.698001 -0.051148 0.596315 -0.686050 -0.524681 0.775281 -0.431256 0.459523 0.649406 -1.709659 0.820167 -1.716683 -0.365401 0
28 0.267840 0.993382 -0.731513 -0.033601 -0.525393 0.229122 -0.296022 1.310078 0.644052 -1.340101 1.103501 0.466996 0.540105 0
29 -1.497035 -1.682687 -2.317594 -1.006384 -1.191688 -1.739630 -0.583086 0.367213 0.619588 -0.474930 -0.553418 -0.278936 -0.651289 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
285 0.054160 0.170817 0.428157 -0.173562 0.450572 0.340002 0.490456 0.213782 -0.773382 -0.563043 0.363713 -0.075376 0.254277 1
286 -0.837236 -0.153388 0.268665 1.306860 1.166541 1.445964 1.199082 -0.371965 -0.308775 -0.231117 0.135108 -0.309420 -0.311043 1
287 0.054272 0.092607 -0.112145 0.149520 -0.877907 0.167855 0.850451 0.678919 -0.080733 -0.661900 -0.257044 -0.389926 -1.512126 1
288 1.304506 -0.939417 -1.006008 1.020838 -1.215004 0.813775 1.376113 -1.476023 -2.020203 0.337259 2.404807 -1.045089 0.294137 1
289 -0.159584 -0.189562 -0.163770 0.245326 -0.612664 0.540468 0.129258 -1.140293 -1.534282 -0.446273 1.154468 0.496372 -0.844209 1
290 -0.506257 -0.132656 -0.153020 -0.443656 -1.058433 -1.070196 -1.502266 -0.228195 0.011621 0.395647 -0.043527 -0.074259 -0.944997 1
291 0.434058 0.398883 0.528841 1.249494 -0.183209 0.484735 0.107864 -0.036215 0.158756 0.063774 -0.015594 0.089898 -0.386434 1
292 0.539818 0.967349 0.496940 0.788583 0.455748 -0.003488 0.155054 0.250439 0.033537 -0.103080 0.763456 0.120334 -0.561083 1
293 0.273525 -0.141184 0.986558 0.184194 -1.414155 -0.887289 1.438143 0.760252 -0.135023 2.252775 1.900066 -0.624009 1.138131 1
294 1.757881 -0.657600 -0.123573 1.713741 0.702842 1.053753 0.625178 -0.475339 -0.351226 0.392355 0.055097 -0.855496 0.261994 1
295 1.094424 -0.403254 -0.422590 1.461920 0.799118 0.835687 0.944428 -0.017134 -0.567998 0.548997 -0.402101 -1.351402 0.844210 1
296 0.456168 0.097137 0.082277 0.474571 -0.507174 -0.722248 -1.600889 -0.346317 0.125499 -0.987583 -0.902738 -0.334857 0.502328 1
297 0.158964 1.272002 -0.348555 -0.114827 -0.295698 0.912253 -0.330632 0.873334 -0.284744 -1.295494 -0.870216 0.332550 1.474750 1
298 -0.259693 1.853691 0.322474 0.273480 -0.345550 0.076148 -0.807223 1.185136 0.497571 0.246526 0.604499 0.325991 0.648890 1
299 -2.384686 -0.883216 0.899645 1.092020 -0.445358 0.965983 0.519488 0.327030 0.842924 -0.068183 -0.353463 0.674622 0.616154 1
300 -1.592394 -0.161493 1.268663 1.545345 -0.785313 1.028442 0.029964 0.687974 0.585005 -0.271009 -0.309379 0.500772 0.919246 1
301 -1.609403 -0.213622 1.375684 1.572829 -0.631037 1.172929 0.158255 0.764985 0.633586 -0.171438 -0.369442 0.503642 1.007372 1
302 1.636269 0.260324 0.678085 -0.587416 -0.746366 -1.079248 -0.077932 0.638303 0.636743 0.407116 -0.026192 -0.277632 -0.299649 1
303 -0.468490 1.262367 1.213487 0.908453 0.995514 0.311472 0.225458 -0.034264 -0.759979 -0.553620 -0.608785 0.868899 0.308641 1
304 0.142289 1.055504 -0.095146 0.415827 0.418496 0.116729 0.275183 0.600879 0.346584 0.250517 0.332107 0.369426 -0.246906 1
305 1.335436 0.402811 -1.381643 -0.110506 0.678178 1.421056 -0.283357 0.434156 1.225881 1.955751 0.309648 -0.785677 -2.348680 1
306 3.500613 -0.323410 -0.934394 -3.041308 1.822358 3.047378 1.485650 2.669335 2.613811 1.719411 -1.458878 -5.545815 -3.654033 1
307 3.518855 -0.474530 -0.626225 -2.369853 1.940903 2.966850 1.650342 2.153435 2.683362 2.298387 -0.752895 -5.262247 -4.609324 1
308 -0.475987 0.385400 0.283385 0.721130 0.390121 0.767408 0.105529 1.024463 1.051642 0.737865 1.172001 0.939039 0.996757 1
309 1.579671 1.097062 0.916778 0.242411 1.350012 1.924193 1.269541 0.800714 0.639968 1.385723 -1.326387 0.948539 2.215496 1
310 -1.373143 0.545169 2.242534 0.151875 0.131935 1.422049 -0.049814 0.337962 1.705104 1.020922 1.432972 1.506929 0.326875 1
311 -0.555796 0.192435 -1.486600 -1.844351 -0.024486 1.541478 2.218745 1.405936 0.718192 -0.125397 -0.557874 -0.139931 1.048558 1
312 -0.017409 0.098378 -0.037330 0.351157 -0.857030 -1.081681 0.404292 1.175376 0.863678 1.257200 1.194305 1.918621 0.765312 1
313 -0.318014 0.774156 -0.322540 -0.221254 -0.285732 0.116133 0.130191 -0.130453 -0.192462 0.885042 1.494832 0.568910 -0.113303 1
314 -1.382512 0.949410 -0.199056 0.190953 -1.174835 -0.995885 0.048886 -0.304819 -1.529189 -1.023882 -1.763085 -0.975472 -0.705853 1

315 rows × 14 columns

In [68]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[68]:
[4095.0,
 3677.5272636488644,
 3305.67969308587,
 3121.4860763676925,
 2968.345101601044,
 2857.7937836988276,
 2716.9450438941003,
 2608.9364022273085,
 2516.5224429226737,
 2419.1503973007284,
 2368.727763755893,
 2356.6118214758935,
 2295.7977011967973,
 2209.1952143690924]
In [69]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[69]:
[<matplotlib.lines.Line2D at 0x1b8226f8278>]
In [78]:
avers = []
for k in range(2,15) :
    km = KMeans(n_clusters=k, random_state=0, n_init=10)
    km.fit(X)
    y_clusters = km.labels_
    silueta_puntos= silhouette_score(X, y_clusters)
    avers.append(silueta_puntos)
avers
Out[78]:
[0.1334599578746985,
 0.10421908702341012,
 0.09891517752652676,
 0.10149681615751224,
 0.0927079455677389,
 0.07588598939974744,
 0.08357922084658045,
 0.07482765886529266,
 0.08621245822232701,
 0.07987632421011658,
 0.08304819187882027,
 0.0699057551321508,
 0.0919264952801006]
In [79]:
plt.figure(figsize=(6,6))
plt.plot(range(2, 15), avers)
Out[79]:
[<matplotlib.lines.Line2D at 0x21d2ab4d898>]
In [114]:
k=2
X_in = X.iloc[:,:-1]
kmeans = KMeans(n_clusters=k, random_state=0, n_init=10)
kmeans.fit(X_in)
y_clusters = kmeans.labels_
cluster_labels = np.unique(y_clusters)
print(Counter(y_clusters))

silueta_puntos= silhouette_samples(X_in, y_clusters, metric='euclidean')

y_ax_lower, y_ax_upper = 0, 0
yticks = []
colores = ['r', 'g', 'b', 'y', 'o']
for i, c in enumerate(cluster_labels):
    silueta_puntos_c = silueta_puntos[y_clusters == c]
    silueta_puntos_c.sort()
    y_ax_upper += len(silueta_puntos_c)
    color = colores[i]
    plt.barh(range(y_ax_lower, y_ax_upper), silueta_puntos_c, height=1.0, 
             edgecolor='none', color=color)

    yticks.append((y_ax_lower + y_ax_upper) / 2.)
    y_ax_lower += len(silueta_puntos_c)
    
silueta_promedio = np.mean(silueta_puntos)
plt.axvline(silueta_promedio, color="black", linestyle="--") 

plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Coeficiente de silueta')

plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()

X_in['Cluster'] = y_clusters
X_in['chosen'] = X['chosen']

stacked = X_in.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(9,9))
Counter({0: 214, 1: 101})
Out[114]:
<matplotlib.axes._subplots.AxesSubplot at 0x21d2a9aeda0>
In [116]:
k=3
X_in = X.iloc[:,:-1]
print(X_in.shape)
kmeans = KMeans(n_clusters=k, random_state=0, n_init=10)
kmeans.fit(X_in)
y_clusters = kmeans.labels_
cluster_labels = np.unique(y_clusters)
print(Counter(y_clusters))

print(df_n_ps[0][y_clusters==2][['artist','song']])
print(np.where(y_clusters==2)[0])
X=X.drop(np.where(y_clusters==2)[0], axis=0)

X_in = X.iloc[:,:-1]
print(X_in.shape)
kmeans = KMeans(n_clusters=k, random_state=0, n_init=10)
kmeans.fit(X_in)
y_clusters = kmeans.labels_
cluster_labels = np.unique(y_clusters)
print(Counter(y_clusters))

silueta_puntos= silhouette_samples(X_in, y_clusters, metric='euclidean')

y_ax_lower, y_ax_upper = 0, 0
yticks = []
colores = ['r', 'g', 'b', 'y', 'o']
for i, c in enumerate(cluster_labels):
    silueta_puntos_c = silueta_puntos[y_clusters == c]
    silueta_puntos_c.sort()
    y_ax_upper += len(silueta_puntos_c)
    color = colores[i]
    plt.barh(range(y_ax_lower, y_ax_upper), silueta_puntos_c, height=1.0, 
             edgecolor='none', color=color)

    yticks.append((y_ax_lower + y_ax_upper) / 2.)
    y_ax_lower += len(silueta_puntos_c)
    
silueta_promedio = np.mean(silueta_puntos)
plt.axvline(silueta_promedio, color="black", linestyle="--") 

plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Coeficiente de silueta')

plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()

X_in['Cluster'] = y_clusters
X_in['chosen'] = X['chosen']

stacked = X_in.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(7,7))
(315, 13)
Counter({1: 158, 0: 151, 2: 6})
                     artist                                               song
173   Lil Hardin Armstrong                        Oriental Swing (02-2-38).mp3
174   Lil Hardin Armstrong                        Oriental Swing (02-2-38).mp3
175   Lil Hardin Armstrong                        Oriental Swing (02-2-38).mp3
151             Jamiroquai    Cloud 9.mp3                                  ...
121          Satin Jackets    Take It From Me.mp3                          ...
120          Satin Jackets    Take It From Me.mp3                          ...
[173 174 175 232 306 307]
(309, 13)
Counter({1: 132, 0: 105, 2: 72})
Out[116]:
<matplotlib.axes._subplots.AxesSubplot at 0x21d2aa67320>
In [115]:
k=4
X_in = X.iloc[:,:-1]
print(X_in.shape)
kmeans = KMeans(n_clusters=k, random_state=0, n_init=10)
kmeans.fit(X_in)
y_clusters = kmeans.labels_
cluster_labels = np.unique(y_clusters)

print(Counter(y_clusters))
silueta_puntos= silhouette_samples(X_in, y_clusters, metric='euclidean')

y_ax_lower, y_ax_upper = 0, 0
yticks = []
colores = ['r', 'g', 'b', 'y', 'o']
for i, c in enumerate(cluster_labels):
    silueta_puntos_c = silueta_puntos[y_clusters == c]
    silueta_puntos_c.sort()
    y_ax_upper += len(silueta_puntos_c)
    color = colores[i]
    plt.barh(range(y_ax_lower, y_ax_upper), silueta_puntos_c, height=1.0, 
             edgecolor='none', color=color)

    yticks.append((y_ax_lower + y_ax_upper) / 2.)
    y_ax_lower += len(silueta_puntos_c)
    
silueta_promedio = np.mean(silueta_puntos)
plt.axvline(silueta_promedio, color="black", linestyle="--") 

plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Coeficiente de silueta')

plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()


X_in['Cluster'] = y_clusters
X_in['chosen'] = X['chosen']

stacked = X_in.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(9,9))
(315, 13)
Counter({0: 138, 2: 121, 1: 50, 3: 6})
In [117]:
k=5
X_in = X.iloc[:,:-1]
kmeans = KMeans(n_clusters=k, random_state=0, n_init=10)
kmeans.fit(X_in)
y_clusters = kmeans.labels_
cluster_labels = np.unique(y_clusters)
print(Counter(y_clusters))


silueta_puntos= silhouette_samples(X_in, y_clusters, metric='euclidean')

y_ax_lower, y_ax_upper = 0, 0
yticks = []
colores = ['red', 'g', 'b', 'y', 'darkorange']
for i, c in enumerate(cluster_labels):
    silueta_puntos_c = silueta_puntos[y_clusters == c]
    silueta_puntos_c.sort()
    y_ax_upper += len(silueta_puntos_c)
    color = colores[i]
    plt.barh(range(y_ax_lower, y_ax_upper), silueta_puntos_c, height=1.0, 
             edgecolor='none', color=color)

    yticks.append((y_ax_lower + y_ax_upper) / 2.)
    y_ax_lower += len(silueta_puntos_c)
    
silueta_promedio = np.mean(silueta_puntos)
plt.axvline(silueta_promedio, color="black", linestyle="--") 

plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Coeficiente de silueta')

plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()


X_in['Cluster'] = y_clusters
X_in['chosen'] = X['chosen']

stacked = X_in.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(9,9))
Counter({3: 96, 4: 78, 1: 61, 2: 44, 0: 30})
Out[117]:
<matplotlib.axes._subplots.AxesSubplot at 0x21d2b1e94e0>

K=3

In [70]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[70]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [71]:
kmeans_mfcc.labels_
Out[71]:
array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
       0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0,
       1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0,
       0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2,
       1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 2, 0, 0, 0, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1,
       0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1,
       1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 2,
       1, 1, 1, 1, 1, 1, 0])
In [72]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[72]:
array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
       0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0,
       1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0,
       0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2,
       1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 2, 0, 0, 0, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1,
       0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1,
       1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 2,
       1, 1, 1, 1, 1, 1, 0])
In [73]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [74]:
X
Out[74]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.297583 1.225637 -0.367641 0.606499 0.072373 -2.029620 0.791469 0.752018 2.268802 -1.383289 0.548279 1.903211 -1.011470 1 0
1 0.637676 -1.507256 -1.572737 -0.954161 -0.857425 0.327005 0.816764 0.214245 0.241703 0.637066 1.601538 0.300317 -0.466779 0 0
2 2.236730 -0.319414 0.669910 -1.918119 -0.820882 -2.379333 -1.570021 -2.755344 -2.150610 -2.528577 -0.877081 -0.522248 -1.429911 0 0
3 0.662077 -0.381499 0.111981 -1.743808 -1.317593 -1.348534 -0.627198 -1.629882 -2.075974 -1.248765 -1.126014 -1.316359 -1.126174 0 0
4 0.736502 0.112932 -0.065024 -1.049458 -0.408043 -0.437499 0.090831 -0.852983 -1.922491 -0.284365 0.210624 -0.032122 -0.700183 0 0
5 2.044945 -1.519304 -0.247242 -1.575018 -3.066163 -3.318978 -4.228656 -1.698572 -1.328177 -1.585685 -2.068965 -1.981423 -2.570963 0 0
6 -0.163510 0.463882 0.433775 0.607952 -1.213428 0.398472 0.549864 -0.956001 -2.046092 0.848843 0.361883 -0.369335 0.023466 0 0
7 0.469947 -0.206445 -0.861641 -1.194743 -1.342011 -0.537321 -0.618801 0.066311 1.216468 2.424672 0.407859 -0.971317 -0.885313 0 0
8 0.884810 -2.954758 0.310482 -0.346234 -0.339963 -0.215837 -0.238824 -0.154440 -0.038298 0.157431 -0.417738 -0.346934 -0.136933 0 0
9 -0.966889 1.065377 -0.166673 -0.580533 0.526040 0.508878 1.073663 0.967893 0.083588 0.138440 -0.514643 0.318418 0.368555 1 0
10 0.322674 -1.252898 -1.332613 -0.629526 -1.062802 -0.073310 -0.754358 -0.444217 -0.260065 0.586226 -0.084312 0.912315 0.392903 0 0
11 -0.198884 0.112823 0.240285 0.472717 -0.142430 0.364603 0.036949 0.383315 0.567300 -0.213906 -0.666284 0.251886 0.001530 1 0
12 -0.704344 0.127023 0.716665 -0.403523 -0.515524 -0.315358 0.189853 -0.589153 -0.264368 -0.267752 -0.195456 0.069763 0.028519 0 0
13 -0.798804 -0.117919 -0.357717 0.064237 -1.152914 -0.559676 -0.651647 -0.039909 -0.177030 0.500161 -0.273613 0.258679 0.790680 0 0
14 -0.078167 -0.317301 0.539934 0.021348 -0.238394 -1.228167 -0.600813 0.330424 -0.715325 0.747390 -0.474837 -0.755240 -0.089072 0 0
15 0.239636 0.763042 0.457000 0.194748 -0.448390 -1.142082 -1.844178 0.710890 -0.411874 -0.048000 0.959817 1.034801 -0.488467 0 0
16 0.593005 -0.028653 0.182359 -0.006019 0.143398 -0.749865 0.313923 0.942236 0.207952 -0.901225 -0.147923 0.552419 -1.176684 0 0
17 -0.565089 -0.734146 0.115715 0.669649 0.299612 -0.880228 0.070142 -0.683059 -0.693274 0.872570 1.622547 -0.210766 0.369132 0 0
18 -0.286584 -1.165487 -0.511773 0.317715 0.517783 -1.379822 0.371120 0.010478 -0.697242 0.652545 -0.084622 -0.479999 1.158310 0 0
19 -0.493038 -0.532596 0.330444 0.672695 0.252734 -0.921397 -1.266250 0.422426 1.086459 2.009229 2.043563 0.507131 0.412127 1 0
20 1.367128 -0.497386 -0.060729 0.760778 -1.140772 0.151619 -1.896890 -3.328933 -1.164662 -1.274380 0.647583 1.589549 0.963561 0 0
21 0.073944 0.076483 0.128736 0.807417 -1.111030 -0.134221 -1.151001 -1.658868 -1.130705 -0.105905 -0.047689 -0.710169 1.125641 0 0
22 0.660587 -0.157368 -0.327968 0.214143 -1.218601 -0.164400 -1.547244 -1.835812 -1.428252 -0.025563 0.297802 -1.007616 0.493018 0 0
23 0.629501 0.686317 0.608046 -0.676736 -0.888682 0.627145 -1.099783 0.193809 -0.138455 -2.486797 -0.846714 -2.218162 -0.746468 0 0
24 -0.898063 2.104109 0.850318 1.640696 0.809305 1.055106 -0.219443 0.333777 -0.393837 0.524945 0.726660 -1.038018 0.706289 1 0
25 -1.139909 1.564949 0.224228 1.291732 0.521761 0.566571 -0.502128 0.174052 -0.668503 0.525822 0.608177 -0.929238 0.913787 1 0
26 -0.670112 0.400455 0.542697 0.702795 -0.312340 0.610331 0.039142 0.210504 -0.147396 0.558589 -0.037638 -1.606275 -0.202252 1 0
27 -0.698001 -0.051148 0.596315 -0.686050 -0.524681 0.775281 -0.431256 0.459523 0.649406 -1.709659 0.820167 -1.716683 -0.365401 0 0
28 0.267840 0.993382 -0.731513 -0.033601 -0.525393 0.229122 -0.296022 1.310078 0.644052 -1.340101 1.103501 0.466996 0.540105 1 0
29 -1.497035 -1.682687 -2.317594 -1.006384 -1.191688 -1.739630 -0.583086 0.367213 0.619588 -0.474930 -0.553418 -0.278936 -0.651289 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
285 0.054160 0.170817 0.428157 -0.173562 0.450572 0.340002 0.490456 0.213782 -0.773382 -0.563043 0.363713 -0.075376 0.254277 1 1
286 -0.837236 -0.153388 0.268665 1.306860 1.166541 1.445964 1.199082 -0.371965 -0.308775 -0.231117 0.135108 -0.309420 -0.311043 1 1
287 0.054272 0.092607 -0.112145 0.149520 -0.877907 0.167855 0.850451 0.678919 -0.080733 -0.661900 -0.257044 -0.389926 -1.512126 0 1
288 1.304506 -0.939417 -1.006008 1.020838 -1.215004 0.813775 1.376113 -1.476023 -2.020203 0.337259 2.404807 -1.045089 0.294137 0 1
289 -0.159584 -0.189562 -0.163770 0.245326 -0.612664 0.540468 0.129258 -1.140293 -1.534282 -0.446273 1.154468 0.496372 -0.844209 0 1
290 -0.506257 -0.132656 -0.153020 -0.443656 -1.058433 -1.070196 -1.502266 -0.228195 0.011621 0.395647 -0.043527 -0.074259 -0.944997 0 1
291 0.434058 0.398883 0.528841 1.249494 -0.183209 0.484735 0.107864 -0.036215 0.158756 0.063774 -0.015594 0.089898 -0.386434 1 1
292 0.539818 0.967349 0.496940 0.788583 0.455748 -0.003488 0.155054 0.250439 0.033537 -0.103080 0.763456 0.120334 -0.561083 1 1
293 0.273525 -0.141184 0.986558 0.184194 -1.414155 -0.887289 1.438143 0.760252 -0.135023 2.252775 1.900066 -0.624009 1.138131 1 1
294 1.757881 -0.657600 -0.123573 1.713741 0.702842 1.053753 0.625178 -0.475339 -0.351226 0.392355 0.055097 -0.855496 0.261994 1 1
295 1.094424 -0.403254 -0.422590 1.461920 0.799118 0.835687 0.944428 -0.017134 -0.567998 0.548997 -0.402101 -1.351402 0.844210 1 1
296 0.456168 0.097137 0.082277 0.474571 -0.507174 -0.722248 -1.600889 -0.346317 0.125499 -0.987583 -0.902738 -0.334857 0.502328 0 1
297 0.158964 1.272002 -0.348555 -0.114827 -0.295698 0.912253 -0.330632 0.873334 -0.284744 -1.295494 -0.870216 0.332550 1.474750 1 1
298 -0.259693 1.853691 0.322474 0.273480 -0.345550 0.076148 -0.807223 1.185136 0.497571 0.246526 0.604499 0.325991 0.648890 1 1
299 -2.384686 -0.883216 0.899645 1.092020 -0.445358 0.965983 0.519488 0.327030 0.842924 -0.068183 -0.353463 0.674622 0.616154 1 1
300 -1.592394 -0.161493 1.268663 1.545345 -0.785313 1.028442 0.029964 0.687974 0.585005 -0.271009 -0.309379 0.500772 0.919246 1 1
301 -1.609403 -0.213622 1.375684 1.572829 -0.631037 1.172929 0.158255 0.764985 0.633586 -0.171438 -0.369442 0.503642 1.007372 1 1
302 1.636269 0.260324 0.678085 -0.587416 -0.746366 -1.079248 -0.077932 0.638303 0.636743 0.407116 -0.026192 -0.277632 -0.299649 0 1
303 -0.468490 1.262367 1.213487 0.908453 0.995514 0.311472 0.225458 -0.034264 -0.759979 -0.553620 -0.608785 0.868899 0.308641 1 1
304 0.142289 1.055504 -0.095146 0.415827 0.418496 0.116729 0.275183 0.600879 0.346584 0.250517 0.332107 0.369426 -0.246906 1 1
305 1.335436 0.402811 -1.381643 -0.110506 0.678178 1.421056 -0.283357 0.434156 1.225881 1.955751 0.309648 -0.785677 -2.348680 1 1
306 3.500613 -0.323410 -0.934394 -3.041308 1.822358 3.047378 1.485650 2.669335 2.613811 1.719411 -1.458878 -5.545815 -3.654033 2 1
307 3.518855 -0.474530 -0.626225 -2.369853 1.940903 2.966850 1.650342 2.153435 2.683362 2.298387 -0.752895 -5.262247 -4.609324 2 1
308 -0.475987 0.385400 0.283385 0.721130 0.390121 0.767408 0.105529 1.024463 1.051642 0.737865 1.172001 0.939039 0.996757 1 1
309 1.579671 1.097062 0.916778 0.242411 1.350012 1.924193 1.269541 0.800714 0.639968 1.385723 -1.326387 0.948539 2.215496 1 1
310 -1.373143 0.545169 2.242534 0.151875 0.131935 1.422049 -0.049814 0.337962 1.705104 1.020922 1.432972 1.506929 0.326875 1 1
311 -0.555796 0.192435 -1.486600 -1.844351 -0.024486 1.541478 2.218745 1.405936 0.718192 -0.125397 -0.557874 -0.139931 1.048558 1 1
312 -0.017409 0.098378 -0.037330 0.351157 -0.857030 -1.081681 0.404292 1.175376 0.863678 1.257200 1.194305 1.918621 0.765312 1 1
313 -0.318014 0.774156 -0.322540 -0.221254 -0.285732 0.116133 0.130191 -0.130453 -0.192462 0.885042 1.494832 0.568910 -0.113303 1 1
314 -1.382512 0.949410 -0.199056 0.190953 -1.174835 -0.995885 0.048886 -0.304819 -1.529189 -1.023882 -1.763085 -0.975472 -0.705853 0 1

315 rows × 15 columns

In [75]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[75]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b8227479b0>
In [76]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

Club De Banqueros y Empresarios

ANN

In [41]:
X = df_n_ps_std_mfcc[1]
In [42]:
y = df_n_ps[1]['chosen']
In [43]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [44]:
X_train.shape
Out[44]:
(191, 13)
In [45]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [46]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [47]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [48]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-48-eff0423a2927> in <module>
----> 1 grid.fit(X_train, y_train)
      2 
      3 print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
      4     grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
      5 end = time.time() # Tiempo después de finalizar el entrenamiento del modelo

C:\ProgramData\Anaconda3\lib\site-packages\sklearn\model_selection\_search.py in fit(self, X, y, groups, **fit_params)
    638                                   error_score=self.error_score)
    639           for parameters, (train, test) in product(candidate_params,
--> 640                                                    cv.split(X, y, groups)))
    641 
    642         # if one choose to see train score, "out" will contain train score info

C:\ProgramData\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self, iterable)
    787                 # consumption.
    788                 self._iterating = False
--> 789             self.retrieve()
    790             # Make sure that we get a last message telling us we are done
    791             elapsed_time = time.time() - self._start_time

C:\ProgramData\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in retrieve(self)
    697             try:
    698                 if getattr(self._backend, 'supports_timeout', False):
--> 699                     self._output.extend(job.get(timeout=self.timeout))
    700                 else:
    701                     self._output.extend(job.get())

C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in get(self, timeout)
    649 
    650     def get(self, timeout=None):
--> 651         self.wait(timeout)
    652         if not self.ready():
    653             raise TimeoutError

C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in wait(self, timeout)
    646 
    647     def wait(self, timeout=None):
--> 648         self._event.wait(timeout)
    649 
    650     def get(self, timeout=None):

C:\ProgramData\Anaconda3\lib\threading.py in wait(self, timeout)
    550             signaled = self._flag
    551             if not signaled:
--> 552                 signaled = self._cond.wait(timeout)
    553             return signaled
    554 

C:\ProgramData\Anaconda3\lib\threading.py in wait(self, timeout)
    294         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    295             if timeout is None:
--> 296                 waiter.acquire()
    297                 gotit = True
    298             else:

KeyboardInterrupt: 
In [49]:
grid.best_params_={'activation': 'relu', 'hidden_layer_sizes': (20, 20, 20), 'learning_rate_init': 0.009, 'max_iter': 300}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [50]:
input_tensor = Input(shape = (n0,))
In [51]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [52]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [53]:
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_3 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_4 (Dense)              (None, 20)                280       
_________________________________________________________________
dense_5 (Dense)              (None, 20)                420       
_________________________________________________________________
dense_6 (Dense)              (None, 20)                420       
_________________________________________________________________
dense_7 (Dense)              (None, 1)                 21        
=================================================================
Total params: 1,141
Trainable params: 1,141
Non-trainable params: 0
_________________________________________________________________
In [54]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/300
191/191 [==============================] - 0s 2ms/step - loss: 0.6708 - accuracy: 0.6754 - val_loss: 0.5829 - val_accuracy: 0.6875
Epoch 2/300
191/191 [==============================] - 0s 78us/step - loss: 0.5742 - accuracy: 0.7277 - val_loss: 0.5611 - val_accuracy: 0.6875
Epoch 3/300
191/191 [==============================] - 0s 68us/step - loss: 0.5326 - accuracy: 0.7277 - val_loss: 0.5542 - val_accuracy: 0.6875
Epoch 4/300
191/191 [==============================] - 0s 58us/step - loss: 0.4963 - accuracy: 0.7277 - val_loss: 0.5663 - val_accuracy: 0.6875
Epoch 5/300
191/191 [==============================] - 0s 63us/step - loss: 0.4595 - accuracy: 0.7277 - val_loss: 0.5789 - val_accuracy: 0.6875
Epoch 6/300
191/191 [==============================] - 0s 68us/step - loss: 0.4229 - accuracy: 0.7539 - val_loss: 0.5846 - val_accuracy: 0.7188
Epoch 7/300
191/191 [==============================] - 0s 68us/step - loss: 0.3853 - accuracy: 0.8220 - val_loss: 0.5885 - val_accuracy: 0.7500
Epoch 8/300
191/191 [==============================] - 0s 68us/step - loss: 0.3538 - accuracy: 0.8639 - val_loss: 0.6374 - val_accuracy: 0.7656
Epoch 9/300
191/191 [==============================] - 0s 73us/step - loss: 0.3117 - accuracy: 0.8901 - val_loss: 0.5932 - val_accuracy: 0.7812
Epoch 10/300
191/191 [==============================] - 0s 78us/step - loss: 0.2713 - accuracy: 0.9005 - val_loss: 0.6179 - val_accuracy: 0.7812
Epoch 11/300
191/191 [==============================] - 0s 78us/step - loss: 0.2289 - accuracy: 0.9267 - val_loss: 0.6047 - val_accuracy: 0.7969
Epoch 12/300
191/191 [==============================] - 0s 73us/step - loss: 0.1980 - accuracy: 0.9372 - val_loss: 0.5598 - val_accuracy: 0.7812
Epoch 13/300
191/191 [==============================] - 0s 68us/step - loss: 0.1694 - accuracy: 0.9476 - val_loss: 0.6474 - val_accuracy: 0.7969
Epoch 14/300
191/191 [==============================] - 0s 78us/step - loss: 0.1326 - accuracy: 0.9686 - val_loss: 0.5661 - val_accuracy: 0.8125
Epoch 15/300
191/191 [==============================] - 0s 73us/step - loss: 0.1077 - accuracy: 0.9686 - val_loss: 0.8141 - val_accuracy: 0.7969
Epoch 16/300
191/191 [==============================] - 0s 73us/step - loss: 0.0903 - accuracy: 0.9791 - val_loss: 0.6904 - val_accuracy: 0.7969
Epoch 17/300
191/191 [==============================] - 0s 68us/step - loss: 0.0707 - accuracy: 0.9895 - val_loss: 0.8441 - val_accuracy: 0.8125
Epoch 18/300
191/191 [==============================] - 0s 68us/step - loss: 0.0686 - accuracy: 0.9738 - val_loss: 1.0697 - val_accuracy: 0.8281
Epoch 19/300
191/191 [==============================] - 0s 68us/step - loss: 0.0483 - accuracy: 0.9895 - val_loss: 0.9490 - val_accuracy: 0.7969
Epoch 20/300
191/191 [==============================] - 0s 63us/step - loss: 0.0443 - accuracy: 0.9895 - val_loss: 1.0081 - val_accuracy: 0.8281
Epoch 21/300
191/191 [==============================] - 0s 68us/step - loss: 0.0370 - accuracy: 0.9895 - val_loss: 1.0532 - val_accuracy: 0.8281
Epoch 22/300
191/191 [==============================] - 0s 73us/step - loss: 0.0299 - accuracy: 0.9895 - val_loss: 1.1333 - val_accuracy: 0.8125
Epoch 23/300
191/191 [==============================] - 0s 73us/step - loss: 0.0266 - accuracy: 0.9895 - val_loss: 1.2640 - val_accuracy: 0.7969
Epoch 24/300
191/191 [==============================] - 0s 68us/step - loss: 0.0206 - accuracy: 0.9895 - val_loss: 1.3415 - val_accuracy: 0.7812
Epoch 25/300
191/191 [==============================] - 0s 78us/step - loss: 0.0163 - accuracy: 0.9948 - val_loss: 1.3880 - val_accuracy: 0.7812
Epoch 26/300
191/191 [==============================] - 0s 78us/step - loss: 0.0127 - accuracy: 1.0000 - val_loss: 1.4083 - val_accuracy: 0.7812
Epoch 27/300
191/191 [==============================] - 0s 78us/step - loss: 0.0109 - accuracy: 1.0000 - val_loss: 1.4624 - val_accuracy: 0.7812
Epoch 28/300
191/191 [==============================] - 0s 78us/step - loss: 0.0083 - accuracy: 1.0000 - val_loss: 1.5283 - val_accuracy: 0.7812

Epoch 00028: ReduceLROnPlateau reducing learning rate to 0.0044999998062849045.
Epoch 29/300
191/191 [==============================] - 0s 73us/step - loss: 0.0071 - accuracy: 1.0000 - val_loss: 1.5350 - val_accuracy: 0.7812
Epoch 30/300
191/191 [==============================] - 0s 68us/step - loss: 0.0063 - accuracy: 1.0000 - val_loss: 1.5338 - val_accuracy: 0.7812
Epoch 31/300
191/191 [==============================] - 0s 68us/step - loss: 0.0059 - accuracy: 1.0000 - val_loss: 1.5534 - val_accuracy: 0.7812
Epoch 32/300
191/191 [==============================] - 0s 73us/step - loss: 0.0053 - accuracy: 1.0000 - val_loss: 1.5753 - val_accuracy: 0.7812
Epoch 33/300
191/191 [==============================] - 0s 73us/step - loss: 0.0049 - accuracy: 1.0000 - val_loss: 1.5968 - val_accuracy: 0.7812
Epoch 34/300
191/191 [==============================] - 0s 68us/step - loss: 0.0046 - accuracy: 1.0000 - val_loss: 1.6156 - val_accuracy: 0.7812
Epoch 35/300
191/191 [==============================] - 0s 52us/step - loss: 0.0042 - accuracy: 1.0000 - val_loss: 1.6331 - val_accuracy: 0.7812
Epoch 36/300
191/191 [==============================] - 0s 63us/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 1.6560 - val_accuracy: 0.7812
Epoch 37/300
191/191 [==============================] - 0s 68us/step - loss: 0.0037 - accuracy: 1.0000 - val_loss: 1.6676 - val_accuracy: 0.7812
Epoch 38/300
191/191 [==============================] - 0s 73us/step - loss: 0.0035 - accuracy: 1.0000 - val_loss: 1.6919 - val_accuracy: 0.7812

Epoch 00038: ReduceLROnPlateau reducing learning rate to 0.0022499999031424522.
Epoch 39/300
191/191 [==============================] - 0s 68us/step - loss: 0.0033 - accuracy: 1.0000 - val_loss: 1.6996 - val_accuracy: 0.7812
Epoch 40/300
191/191 [==============================] - 0s 73us/step - loss: 0.0032 - accuracy: 1.0000 - val_loss: 1.7069 - val_accuracy: 0.7812
Epoch 41/300
191/191 [==============================] - ETA: 0s - loss: 0.0017 - accuracy: 1.00 - 0s 89us/step - loss: 0.0031 - accuracy: 1.0000 - val_loss: 1.7129 - val_accuracy: 0.7812
Epoch 42/300
191/191 [==============================] - 0s 105us/step - loss: 0.0030 - accuracy: 1.0000 - val_loss: 1.7220 - val_accuracy: 0.7812
Epoch 43/300
191/191 [==============================] - 0s 78us/step - loss: 0.0029 - accuracy: 1.0000 - val_loss: 1.7311 - val_accuracy: 0.7812
Epoch 44/300
191/191 [==============================] - 0s 73us/step - loss: 0.0029 - accuracy: 1.0000 - val_loss: 1.7396 - val_accuracy: 0.7812
Epoch 45/300
191/191 [==============================] - 0s 78us/step - loss: 0.0028 - accuracy: 1.0000 - val_loss: 1.7453 - val_accuracy: 0.7812
Epoch 46/300
191/191 [==============================] - 0s 68us/step - loss: 0.0027 - accuracy: 1.0000 - val_loss: 1.7520 - val_accuracy: 0.7812
Epoch 47/300
191/191 [==============================] - 0s 78us/step - loss: 0.0026 - accuracy: 1.0000 - val_loss: 1.7591 - val_accuracy: 0.7812
Epoch 48/300
191/191 [==============================] - 0s 73us/step - loss: 0.0026 - accuracy: 1.0000 - val_loss: 1.7702 - val_accuracy: 0.7812

Epoch 00048: ReduceLROnPlateau reducing learning rate to 0.0011249999515712261.
Epoch 49/300
191/191 [==============================] - 0s 78us/step - loss: 0.0025 - accuracy: 1.0000 - val_loss: 1.7740 - val_accuracy: 0.7812
Epoch 50/300
191/191 [==============================] - 0s 73us/step - loss: 0.0025 - accuracy: 1.0000 - val_loss: 1.7763 - val_accuracy: 0.7812
Epoch 51/300
191/191 [==============================] - 0s 73us/step - loss: 0.0024 - accuracy: 1.0000 - val_loss: 1.7780 - val_accuracy: 0.7812
Epoch 52/300
191/191 [==============================] - 0s 63us/step - loss: 0.0024 - accuracy: 1.0000 - val_loss: 1.7826 - val_accuracy: 0.7812
Epoch 53/300
191/191 [==============================] - 0s 63us/step - loss: 0.0024 - accuracy: 1.0000 - val_loss: 1.7868 - val_accuracy: 0.7812
Epoch 54/300
191/191 [==============================] - 0s 58us/step - loss: 0.0023 - accuracy: 1.0000 - val_loss: 1.7916 - val_accuracy: 0.7812
Epoch 55/300
191/191 [==============================] - 0s 84us/step - loss: 0.0023 - accuracy: 1.0000 - val_loss: 1.7959 - val_accuracy: 0.7812
Epoch 56/300
191/191 [==============================] - 0s 68us/step - loss: 0.0023 - accuracy: 1.0000 - val_loss: 1.7984 - val_accuracy: 0.7812
Epoch 57/300
191/191 [==============================] - 0s 78us/step - loss: 0.0022 - accuracy: 1.0000 - val_loss: 1.8032 - val_accuracy: 0.7812
Epoch 58/300
191/191 [==============================] - 0s 68us/step - loss: 0.0022 - accuracy: 1.0000 - val_loss: 1.8059 - val_accuracy: 0.7812

Epoch 00058: ReduceLROnPlateau reducing learning rate to 0.0005624999757856131.
Epoch 59/300
191/191 [==============================] - 0s 63us/step - loss: 0.0022 - accuracy: 1.0000 - val_loss: 1.8081 - val_accuracy: 0.7812
Epoch 60/300
191/191 [==============================] - 0s 63us/step - loss: 0.0022 - accuracy: 1.0000 - val_loss: 1.8100 - val_accuracy: 0.7812
Epoch 61/300
191/191 [==============================] - 0s 68us/step - loss: 0.0022 - accuracy: 1.0000 - val_loss: 1.8121 - val_accuracy: 0.7812
Epoch 62/300
191/191 [==============================] - 0s 73us/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 1.8143 - val_accuracy: 0.7812
Epoch 63/300
191/191 [==============================] - 0s 89us/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 1.8166 - val_accuracy: 0.7812
Epoch 64/300
191/191 [==============================] - 0s 89us/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 1.8171 - val_accuracy: 0.7812
Epoch 65/300
191/191 [==============================] - 0s 89us/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 1.8199 - val_accuracy: 0.7812
Epoch 66/300
191/191 [==============================] - 0s 84us/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 1.8225 - val_accuracy: 0.7812
Epoch 67/300
191/191 [==============================] - 0s 89us/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 1.8239 - val_accuracy: 0.7812
Epoch 68/300
191/191 [==============================] - 0s 89us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8262 - val_accuracy: 0.7812

Epoch 00068: ReduceLROnPlateau reducing learning rate to 0.00028124998789280653.
Epoch 69/300
191/191 [==============================] - ETA: 0s - loss: 0.0024 - accuracy: 1.00 - 0s 99us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8272 - val_accuracy: 0.7812
Epoch 70/300
191/191 [==============================] - 0s 105us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8279 - val_accuracy: 0.7812
Epoch 71/300
191/191 [==============================] - 0s 99us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8292 - val_accuracy: 0.7812
Epoch 72/300
191/191 [==============================] - 0s 94us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8300 - val_accuracy: 0.7812
Epoch 73/300
191/191 [==============================] - 0s 94us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8310 - val_accuracy: 0.7812
Epoch 74/300
191/191 [==============================] - 0s 99us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8319 - val_accuracy: 0.7812
Epoch 75/300
191/191 [==============================] - 0s 89us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8329 - val_accuracy: 0.7812
Epoch 76/300
191/191 [==============================] - 0s 89us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8342 - val_accuracy: 0.7812
Epoch 77/300
191/191 [==============================] - 0s 94us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8347 - val_accuracy: 0.7812
Epoch 78/300
191/191 [==============================] - 0s 99us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8359 - val_accuracy: 0.7812

Epoch 00078: ReduceLROnPlateau reducing learning rate to 0.00014062499394640326.
Epoch 79/300
191/191 [==============================] - 0s 94us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8367 - val_accuracy: 0.7812
Epoch 80/300
191/191 [==============================] - 0s 120us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8372 - val_accuracy: 0.7812
Epoch 81/300
191/191 [==============================] - 0s 105us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8375 - val_accuracy: 0.7812
Epoch 82/300
191/191 [==============================] - 0s 105us/step - loss: 0.0020 - accuracy: 1.0000 - val_loss: 1.8380 - val_accuracy: 0.7812
Epoch 83/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8386 - val_accuracy: 0.7812
Epoch 84/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8390 - val_accuracy: 0.7812
Epoch 85/300
191/191 [==============================] - 0s 188us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8396 - val_accuracy: 0.7812
Epoch 86/300
191/191 [==============================] - 0s 162us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8402 - val_accuracy: 0.7812
Epoch 87/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8408 - val_accuracy: 0.7812
Epoch 88/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8414 - val_accuracy: 0.7812

Epoch 00088: ReduceLROnPlateau reducing learning rate to 7.031249697320163e-05.
Epoch 89/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8415 - val_accuracy: 0.7812
Epoch 90/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8419 - val_accuracy: 0.7812
Epoch 91/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8421 - val_accuracy: 0.7812
Epoch 92/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8425 - val_accuracy: 0.7812
Epoch 93/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8428 - val_accuracy: 0.7812
Epoch 94/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8431 - val_accuracy: 0.7812
Epoch 95/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8433 - val_accuracy: 0.7812
Epoch 96/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8436 - val_accuracy: 0.7812
Epoch 97/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8440 - val_accuracy: 0.7812
Epoch 98/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8443 - val_accuracy: 0.7812

Epoch 00098: ReduceLROnPlateau reducing learning rate to 3.5156248486600816e-05.
Epoch 99/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8444 - val_accuracy: 0.7812
Epoch 100/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8445 - val_accuracy: 0.7812
Epoch 101/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8447 - val_accuracy: 0.7812
Epoch 102/300
191/191 [==============================] - 0s 84us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8449 - val_accuracy: 0.7812
Epoch 103/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8450 - val_accuracy: 0.7812
Epoch 104/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8451 - val_accuracy: 0.7812
Epoch 105/300
191/191 [==============================] - 0s 115us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8453 - val_accuracy: 0.7812
Epoch 106/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8455 - val_accuracy: 0.7812
Epoch 107/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8457 - val_accuracy: 0.7812
Epoch 108/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8458 - val_accuracy: 0.7812

Epoch 00108: ReduceLROnPlateau reducing learning rate to 1.7578124243300408e-05.
Epoch 109/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8459 - val_accuracy: 0.7812
Epoch 110/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8460 - val_accuracy: 0.7812
Epoch 111/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8461 - val_accuracy: 0.7812
Epoch 112/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8461 - val_accuracy: 0.7812
Epoch 113/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8462 - val_accuracy: 0.7812
Epoch 114/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8463 - val_accuracy: 0.7812
Epoch 115/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8464 - val_accuracy: 0.7812
Epoch 116/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8465 - val_accuracy: 0.7812
Epoch 117/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8466 - val_accuracy: 0.7812
Epoch 118/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8466 - val_accuracy: 0.7812

Epoch 00118: ReduceLROnPlateau reducing learning rate to 8.789062121650204e-06.
Epoch 119/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8467 - val_accuracy: 0.7812
Epoch 120/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8467 - val_accuracy: 0.7812
Epoch 121/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8468 - val_accuracy: 0.7812
Epoch 122/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8468 - val_accuracy: 0.7812
Epoch 123/300
191/191 [==============================] - 0s 131us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8469 - val_accuracy: 0.7812
Epoch 124/300
191/191 [==============================] - 0s 120us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8469 - val_accuracy: 0.7812
Epoch 125/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8470 - val_accuracy: 0.7812
Epoch 126/300
191/191 [==============================] - 0s 84us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8470 - val_accuracy: 0.7812
Epoch 127/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8470 - val_accuracy: 0.7812
Epoch 128/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8471 - val_accuracy: 0.7812

Epoch 00128: ReduceLROnPlateau reducing learning rate to 4.394531060825102e-06.
Epoch 129/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8471 - val_accuracy: 0.7812
Epoch 130/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8471 - val_accuracy: 0.7812
Epoch 131/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8471 - val_accuracy: 0.7812
Epoch 132/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8472 - val_accuracy: 0.7812
Epoch 133/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8472 - val_accuracy: 0.7812
Epoch 134/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8472 - val_accuracy: 0.7812
Epoch 135/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8472 - val_accuracy: 0.7812
Epoch 136/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812
Epoch 137/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812
Epoch 138/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812

Epoch 00138: ReduceLROnPlateau reducing learning rate to 2.197265530412551e-06.
Epoch 139/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812
Epoch 140/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812
Epoch 141/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812
Epoch 142/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8473 - val_accuracy: 0.7812
Epoch 143/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 144/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 145/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 146/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 147/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 148/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812

Epoch 00148: ReduceLROnPlateau reducing learning rate to 1.0986327652062755e-06.
Epoch 149/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 150/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 151/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 152/300
191/191 [==============================] - 0s 120us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8474 - val_accuracy: 0.7812
Epoch 153/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 154/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 155/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 156/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 157/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 158/300
191/191 [==============================] - 0s 84us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00158: ReduceLROnPlateau reducing learning rate to 5.493163826031378e-07.
Epoch 159/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 160/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 161/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 162/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 163/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 164/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 165/300
191/191 [==============================] - ETA: 0s - loss: 0.0014 - accuracy: 1.00 - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 166/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 167/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 168/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00168: ReduceLROnPlateau reducing learning rate to 2.746581913015689e-07.
Epoch 169/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 170/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 171/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 172/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 173/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 174/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 175/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 176/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 177/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 178/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00178: ReduceLROnPlateau reducing learning rate to 1.3732909565078444e-07.
Epoch 179/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 180/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 181/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 182/300
191/191 [==============================] - 0s 209us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 183/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 184/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 185/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 186/300
191/191 [==============================] - 0s 126us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 187/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 188/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00188: ReduceLROnPlateau reducing learning rate to 6.866454782539222e-08.
Epoch 189/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 190/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 191/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 192/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 193/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 194/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 195/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 196/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 197/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 198/300
191/191 [==============================] - 0s 115us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00198: ReduceLROnPlateau reducing learning rate to 3.433227391269611e-08.
Epoch 199/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 200/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 201/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 202/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 203/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 204/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 205/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 206/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 207/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 208/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00208: ReduceLROnPlateau reducing learning rate to 1.7166136956348055e-08.
Epoch 209/300
191/191 [==============================] - 0s 115us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 210/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 211/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 212/300
191/191 [==============================] - 0s 84us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 213/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 214/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 215/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 216/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 217/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 218/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00218: ReduceLROnPlateau reducing learning rate to 8.583068478174027e-09.
Epoch 219/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 220/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 221/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 222/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 223/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 224/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 225/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 226/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 227/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 228/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00228: ReduceLROnPlateau reducing learning rate to 4.291534239087014e-09.
Epoch 229/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 230/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 231/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 232/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 233/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 234/300
191/191 [==============================] - ETA: 0s - loss: 4.6449e-04 - accuracy: 1.00 - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 235/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 236/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 237/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 238/300
191/191 [==============================] - 0s 115us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00238: ReduceLROnPlateau reducing learning rate to 2.145767119543507e-09.
Epoch 239/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 240/300
191/191 [==============================] - 0s 188us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 241/300
191/191 [==============================] - 0s 209us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 242/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 243/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 244/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 245/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 246/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 247/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 248/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00248: ReduceLROnPlateau reducing learning rate to 1.0728835597717534e-09.
Epoch 249/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 250/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 251/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 252/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 253/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 254/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 255/300
191/191 [==============================] - 0s 120us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 256/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 257/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 258/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00258: ReduceLROnPlateau reducing learning rate to 5.364417798858767e-10.
Epoch 259/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 260/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 261/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 262/300
191/191 [==============================] - 0s 115us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 263/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 264/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 265/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 266/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 267/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 268/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00268: ReduceLROnPlateau reducing learning rate to 2.6822088994293836e-10.
Epoch 269/300
191/191 [==============================] - 0s 115us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 270/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 271/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 272/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 273/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 274/300
191/191 [==============================] - 0s 89us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 275/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 276/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 277/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 278/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00278: ReduceLROnPlateau reducing learning rate to 1.3411044497146918e-10.
Epoch 279/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 280/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 281/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 282/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 283/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 284/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 285/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 286/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 287/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 288/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00288: ReduceLROnPlateau reducing learning rate to 6.705522248573459e-11.
Epoch 289/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 290/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 291/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 292/300
191/191 [==============================] - 0s 110us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 293/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 294/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 295/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 296/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 297/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 298/300
191/191 [==============================] - 0s 94us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812

Epoch 00298: ReduceLROnPlateau reducing learning rate to 3.3527611242867295e-11.
Epoch 299/300
191/191 [==============================] - 0s 105us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
Epoch 300/300
191/191 [==============================] - 0s 99us/step - loss: 0.0019 - accuracy: 1.0000 - val_loss: 1.8475 - val_accuracy: 0.7812
In [55]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 300)
In [56]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 78us/step
test loss: 1.8475333452224731, test accuracy: 0.78125
In [57]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.8500000000000001
In [59]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.4285714285714286
[[41  3]
 [11  9]]

KMeans

In [95]:
X
Out[95]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -0.339415 0.847773 0.497198 -0.389310 1.225458 1.947033 -0.736267 0.492219 0.576682 1.504697 -1.796460 0.724954 0.958600
1 0.587658 -1.195426 0.636375 0.199876 0.765321 0.061181 0.379367 -0.440867 0.232893 1.339920 0.110001 0.807525 0.815678
2 1.465595 -2.307943 0.354567 -0.058273 -1.298853 -0.811453 -1.551580 -3.934320 -1.079432 2.546130 1.421407 0.639359 0.199094
3 0.749403 -1.690498 -0.125200 -1.016135 0.825845 0.271444 -0.104786 -0.992141 0.049182 1.425948 -0.343269 -0.789558 -0.411898
4 -0.280577 0.393332 0.744917 2.411400 -0.777421 -0.420018 1.258355 -1.544565 -0.498071 0.421527 -0.632908 -0.056846 -0.072348
5 -0.158690 0.404891 -0.147920 -0.299241 -0.786974 0.697216 0.290501 0.019739 -1.468086 -0.346174 -0.086965 0.026492 1.019512
6 1.646777 0.772744 -1.425228 -0.562610 -1.556076 0.533289 -0.404271 1.676958 0.979516 0.415548 0.544719 0.433332 0.204271
7 1.124970 0.506236 0.738993 1.984485 -0.928706 -0.494097 -0.707105 -0.494778 -1.642929 0.207467 0.181382 2.431721 0.848697
8 0.920059 1.438862 -2.048354 1.503567 -2.801303 0.567132 -0.745441 0.569519 0.130917 1.965436 -0.034797 1.164878 0.074074
9 0.182544 0.310622 0.067722 0.870138 0.168366 0.682045 -0.191296 -0.144962 -0.630020 -0.284032 -0.315301 0.344841 0.495167
10 0.168663 0.389450 0.034360 1.213392 0.248437 0.870618 -0.460824 -0.174734 -0.710502 -0.228408 -0.265153 0.349416 0.584114
11 0.153010 -0.118336 0.639531 1.504522 0.937909 0.356048 -0.089987 -0.628522 0.064203 0.966049 0.403915 -0.943626 0.173874
12 0.132578 0.261966 -2.871493 -3.398160 -0.256458 1.596532 -0.358711 0.175955 -0.499075 0.949085 2.235525 -0.197712 -0.272366
13 1.094629 0.885150 -1.130672 -0.083270 0.672482 0.750453 -0.863949 0.140540 0.423312 -0.305155 -0.424905 0.318660 0.885900
14 0.771472 0.364448 -0.454696 0.434253 0.912699 0.745924 -0.073390 -0.406473 0.450765 0.323180 -0.458826 -0.132295 0.495454
15 0.677561 0.166795 0.746471 0.075191 0.867924 -1.621678 0.771146 -0.067286 0.557998 -0.093593 0.020233 -0.800013 -0.629188
16 -0.032353 1.227345 -0.188580 0.927210 0.016663 1.001867 -0.473811 0.782387 1.542760 -0.345478 -0.838104 -0.439443 1.179204
17 0.459031 1.258961 -0.329412 1.391790 -0.208888 1.059241 -1.245671 0.619153 0.245780 0.644548 -0.602629 -0.928581 0.739885
18 -0.359172 0.051214 -0.603962 0.778896 1.630471 1.802477 1.486205 -0.140738 -0.894366 0.736624 2.114721 1.078175 -0.965785
19 0.209859 -0.615399 -0.676895 0.735655 0.805509 -0.696793 1.073068 0.240429 -0.205934 -0.759693 0.672843 0.569482 -0.455391
20 0.127381 -0.265099 -0.258801 -0.127568 0.649447 0.244473 1.897421 -0.344616 -0.593159 0.065147 1.787607 1.219355 -0.171813
21 1.222717 0.409860 1.311826 0.703873 0.322062 0.305461 -0.522644 -0.750833 0.001767 0.017953 0.254329 -0.227762 -0.614790
22 1.173352 0.490500 0.742825 -0.028159 -0.272396 -0.502733 -0.759443 -1.031924 -0.157975 0.075659 0.604220 0.143298 -0.001849
23 1.069960 0.858822 -0.795544 0.076688 0.851875 0.735014 -0.758779 0.065595 0.532667 -0.391858 -0.497019 0.240822 0.848126
24 0.581377 -0.804045 0.399887 1.535671 0.245878 0.904192 -0.233991 -0.925983 0.212280 0.499535 -0.024926 -0.925999 1.294925
25 0.161110 0.025075 0.716318 1.532230 0.889883 0.353167 -0.058787 -0.593046 0.093773 0.927085 0.199691 -0.979872 0.232850
26 0.431443 0.442713 0.259120 0.045533 0.102675 0.367606 0.054320 0.942924 0.180609 0.550983 0.265291 0.321252 -0.830969
27 0.344525 -1.140315 -0.725453 -0.547965 0.449924 0.303904 1.053624 1.051712 0.509322 0.181611 -0.519979 -1.134490 -1.439105
28 -0.041565 0.671274 0.195143 0.247294 0.531620 1.050124 0.311358 0.988161 -0.198869 0.387795 1.757366 1.351684 0.194840
29 0.417845 -1.134173 -0.760709 -0.605264 0.077464 0.533333 1.104524 2.124971 0.083548 0.801730 0.092534 -1.281628 -1.468782
... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 1.532114 -1.060006 -0.434145 -0.999435 -1.259462 0.039140 -0.802013 -0.655286 0.714448 1.005958 -0.086372 0.537392 0.054440
226 -0.942320 1.172080 0.506725 -0.230675 -0.104635 0.898742 -1.107001 -1.182148 -0.940991 0.232366 1.778224 0.975251 1.731084
227 1.421974 0.631029 -0.563813 -0.694595 -0.673270 0.929022 0.476907 -1.025173 -0.813644 -0.060006 -0.738730 -0.558099 0.057654
228 -1.473385 -0.806223 1.849423 -1.252541 0.941013 -0.872947 -1.812392 -0.242718 -0.097212 -0.510500 -0.232195 -0.546399 0.945530
229 -1.135926 -0.772372 1.164844 -1.022517 0.630202 -0.496999 -1.101656 -0.168921 -0.295159 -0.587401 0.369033 -0.266325 0.604469
230 -1.085049 0.879566 0.442593 0.128917 0.393498 0.531555 0.392194 1.418515 0.891015 -0.348926 -0.756201 -0.838584 -0.015971
231 -0.352258 0.556982 0.530520 0.443818 0.300921 0.032128 -0.797384 -0.573532 0.398084 0.328875 -0.274964 -1.300920 0.254456
232 -1.190363 0.797356 0.758472 0.587917 0.890540 0.471925 0.105793 0.680721 0.230834 -0.150709 -0.816744 -0.470618 0.371198
233 -0.651003 -0.586618 1.326854 -0.451354 0.507113 0.165474 -0.919675 -0.448249 -1.310940 -1.372737 0.406029 -1.414627 -0.434858
234 -1.459511 -0.516281 1.631699 -1.141842 0.584621 -0.458541 -1.428877 -0.934556 -0.216455 -0.049794 0.095580 0.387068 0.693730
235 -0.726984 0.702447 0.798069 -0.320660 0.530902 1.019988 0.144995 0.207847 0.039592 0.220761 0.762941 0.575034 0.671517
236 -0.300986 -0.404923 0.715406 0.245380 -0.427936 -0.334843 -0.228084 -0.330898 -0.674327 0.199560 0.827455 0.016433 0.866789
237 -0.736244 0.088611 0.910051 0.437100 0.258256 0.363828 -0.415290 -0.717445 -0.012727 0.436925 -0.786954 -1.217376 0.352825
238 0.610473 -2.664315 1.303652 -2.022376 1.500032 -1.280926 -1.249533 0.432111 -0.768558 0.291156 -0.092312 0.053770 -0.401166
239 -2.045424 -2.954642 0.302601 -0.868092 -1.038134 -1.230777 0.514329 0.057591 -1.023895 0.275395 -1.450282 0.386242 0.318763
240 0.329793 -1.367570 -1.454329 -0.207924 -0.723609 -0.149025 -0.085298 -0.011595 -0.240239 -0.009120 -0.325229 -0.025722 0.114182
241 -1.919591 1.382172 -0.134161 0.837967 -0.687780 0.944303 -0.258652 -0.742178 0.386031 -1.178099 -1.843543 -0.710556 -0.318561
242 -2.087669 1.400006 -0.494964 0.451717 -0.759188 0.736625 0.133121 -0.196031 1.121231 0.474128 -0.345937 -0.409324 -0.442069
243 -2.131652 0.439305 -0.612226 0.854126 -0.494550 0.825299 0.301373 -0.018964 0.690556 -0.078762 -0.709495 -0.075857 -0.418656
244 -1.611989 -0.756403 -0.410917 1.075909 0.297336 -1.317576 1.115011 -0.467065 -0.768378 1.615499 1.611125 -1.018782 -1.798744
245 -0.142010 0.000190 -0.063461 -0.506353 -0.386942 -0.256144 0.270621 -1.497417 0.507892 0.456828 -0.431169 -0.978417 0.015849
246 -1.263975 -1.168117 -1.396090 -0.312016 1.862268 1.400290 0.646060 -0.686864 0.418524 -0.069926 -0.653856 -0.853617 -0.106814
247 -0.507700 0.899825 1.510153 1.083642 2.081451 0.589016 0.901321 0.658808 0.152596 0.176442 -0.447633 0.287838 0.650479
248 -0.159768 0.518093 2.197018 0.698491 0.476336 -2.014255 -1.614667 -0.397282 -1.781932 -0.208894 1.650551 -0.771436 -0.987237
249 -1.037899 1.016712 2.774230 0.665468 -0.385673 0.587263 -0.121609 -0.331379 0.622484 -0.387131 -0.276584 0.218207 1.689216
250 -0.526923 -1.169944 0.474875 -0.789231 0.369827 -0.537003 -1.089843 -0.173366 -0.023237 -0.142334 0.740065 0.813114 0.872556
251 -0.770856 -1.024349 -0.019140 -0.097521 0.092703 0.369242 -0.273901 0.190740 -0.074032 0.113055 0.140291 -0.696275 0.166679
252 -0.905458 -0.790575 0.206164 -0.723816 -0.444860 0.107833 -0.734514 -0.533865 -0.634334 0.320526 0.088428 -0.348210 0.347201
253 -1.378235 -0.338405 0.016815 -0.394563 0.034043 1.023865 -0.303960 -1.316121 0.198697 0.670577 0.809574 0.580565 0.056004
254 -0.199959 -2.035812 -0.904507 -1.511975 -0.437843 0.262972 -1.943788 -1.963300 -2.256227 0.354369 -0.039829 0.882325 0.139307

255 rows × 13 columns

In [96]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[96]:
[3315.0,
 2972.7888695817974,
 2748.18187155972,
 2544.9420084212106,
 2413.687059384553,
 2278.037996783226,
 2213.3487507256823,
 2123.4282707474663,
 2067.8299633414163,
 1977.777252698108,
 1956.5229777214513,
 1880.0296166971755,
 1815.5096049846275,
 1785.9955747862728]
In [97]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[97]:
[<matplotlib.lines.Line2D at 0x1b8263247f0>]

K=6

In [98]:
kmeans_mfcc = KMeans(n_clusters=6, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[98]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=6, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [99]:
kmeans_mfcc.labels_
Out[99]:
array([4, 2, 2, 2, 0, 1, 1, 0, 1, 4, 4, 4, 1, 4, 4, 3, 4, 4, 4, 4, 4, 0,
       0, 4, 4, 4, 4, 3, 4, 3, 4, 4, 3, 1, 3, 1, 4, 4, 3, 1, 1, 1, 4, 4,
       2, 1, 1, 1, 4, 1, 1, 1, 4, 3, 3, 4, 4, 1, 4, 1, 0, 4, 4, 4, 3, 3,
       3, 4, 3, 0, 4, 2, 1, 4, 0, 4, 4, 3, 3, 0, 0, 2, 1, 4, 1, 0, 0, 3,
       4, 1, 4, 4, 4, 4, 1, 1, 0, 3, 3, 3, 4, 1, 1, 4, 1, 4, 0, 3, 1, 1,
       1, 0, 3, 2, 4, 4, 0, 1, 5, 3, 4, 0, 0, 2, 1, 0, 0, 0, 2, 2, 2, 2,
       4, 4, 0, 0, 2, 2, 2, 4, 4, 4, 2, 2, 2, 3, 0, 4, 1, 3, 3, 3, 3, 3,
       4, 3, 1, 0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 0, 1, 0, 0, 0, 0, 1, 3, 2,
       4, 4, 1, 2, 4, 3, 2, 3, 4, 4, 4, 1, 2, 3, 3, 0, 2, 2, 2, 1, 0, 1,
       4, 2, 0, 4, 3, 3, 2, 3, 5, 5, 2, 3, 3, 2, 0, 3, 0, 2, 4, 2, 1, 2,
       1, 3, 4, 4, 4, 1, 0, 1, 2, 2, 3, 0, 4, 2, 2, 4, 0, 2, 2, 2, 1, 3,
       4, 4, 2, 2, 2, 4, 0, 0, 2, 2, 2, 2, 2])
In [100]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[100]:
array([4, 2, 2, 2, 0, 1, 1, 0, 1, 4, 4, 4, 1, 4, 4, 3, 4, 4, 4, 4, 4, 0,
       0, 4, 4, 4, 4, 3, 4, 3, 4, 4, 3, 1, 3, 1, 4, 4, 3, 1, 1, 1, 4, 4,
       2, 1, 1, 1, 4, 1, 1, 1, 4, 3, 3, 4, 4, 1, 4, 1, 0, 4, 4, 4, 3, 3,
       3, 4, 3, 0, 4, 2, 1, 4, 0, 4, 4, 3, 3, 0, 0, 2, 1, 4, 1, 0, 0, 3,
       4, 1, 4, 4, 4, 4, 1, 1, 0, 3, 3, 3, 4, 1, 1, 4, 1, 4, 0, 3, 1, 1,
       1, 0, 3, 2, 4, 4, 0, 1, 5, 3, 4, 0, 0, 2, 1, 0, 0, 0, 2, 2, 2, 2,
       4, 4, 0, 0, 2, 2, 2, 4, 4, 4, 2, 2, 2, 3, 0, 4, 1, 3, 3, 3, 3, 3,
       4, 3, 1, 0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 0, 1, 0, 0, 0, 0, 1, 3, 2,
       4, 4, 1, 2, 4, 3, 2, 3, 4, 4, 4, 1, 2, 3, 3, 0, 2, 2, 2, 1, 0, 1,
       4, 2, 0, 4, 3, 3, 2, 3, 5, 5, 2, 3, 3, 2, 0, 3, 0, 2, 4, 2, 1, 2,
       1, 3, 4, 4, 4, 1, 0, 1, 2, 2, 3, 0, 4, 2, 2, 4, 0, 2, 2, 2, 1, 3,
       4, 4, 2, 2, 2, 4, 0, 0, 2, 2, 2, 2, 2])
In [101]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [102]:
X
Out[102]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -0.339415 0.847773 0.497198 -0.389310 1.225458 1.947033 -0.736267 0.492219 0.576682 1.504697 -1.796460 0.724954 0.958600 4 0
1 0.587658 -1.195426 0.636375 0.199876 0.765321 0.061181 0.379367 -0.440867 0.232893 1.339920 0.110001 0.807525 0.815678 2 0
2 1.465595 -2.307943 0.354567 -0.058273 -1.298853 -0.811453 -1.551580 -3.934320 -1.079432 2.546130 1.421407 0.639359 0.199094 2 0
3 0.749403 -1.690498 -0.125200 -1.016135 0.825845 0.271444 -0.104786 -0.992141 0.049182 1.425948 -0.343269 -0.789558 -0.411898 2 0
4 -0.280577 0.393332 0.744917 2.411400 -0.777421 -0.420018 1.258355 -1.544565 -0.498071 0.421527 -0.632908 -0.056846 -0.072348 0 0
5 -0.158690 0.404891 -0.147920 -0.299241 -0.786974 0.697216 0.290501 0.019739 -1.468086 -0.346174 -0.086965 0.026492 1.019512 1 0
6 1.646777 0.772744 -1.425228 -0.562610 -1.556076 0.533289 -0.404271 1.676958 0.979516 0.415548 0.544719 0.433332 0.204271 1 0
7 1.124970 0.506236 0.738993 1.984485 -0.928706 -0.494097 -0.707105 -0.494778 -1.642929 0.207467 0.181382 2.431721 0.848697 0 0
8 0.920059 1.438862 -2.048354 1.503567 -2.801303 0.567132 -0.745441 0.569519 0.130917 1.965436 -0.034797 1.164878 0.074074 1 0
9 0.182544 0.310622 0.067722 0.870138 0.168366 0.682045 -0.191296 -0.144962 -0.630020 -0.284032 -0.315301 0.344841 0.495167 4 0
10 0.168663 0.389450 0.034360 1.213392 0.248437 0.870618 -0.460824 -0.174734 -0.710502 -0.228408 -0.265153 0.349416 0.584114 4 0
11 0.153010 -0.118336 0.639531 1.504522 0.937909 0.356048 -0.089987 -0.628522 0.064203 0.966049 0.403915 -0.943626 0.173874 4 0
12 0.132578 0.261966 -2.871493 -3.398160 -0.256458 1.596532 -0.358711 0.175955 -0.499075 0.949085 2.235525 -0.197712 -0.272366 1 0
13 1.094629 0.885150 -1.130672 -0.083270 0.672482 0.750453 -0.863949 0.140540 0.423312 -0.305155 -0.424905 0.318660 0.885900 4 0
14 0.771472 0.364448 -0.454696 0.434253 0.912699 0.745924 -0.073390 -0.406473 0.450765 0.323180 -0.458826 -0.132295 0.495454 4 0
15 0.677561 0.166795 0.746471 0.075191 0.867924 -1.621678 0.771146 -0.067286 0.557998 -0.093593 0.020233 -0.800013 -0.629188 3 0
16 -0.032353 1.227345 -0.188580 0.927210 0.016663 1.001867 -0.473811 0.782387 1.542760 -0.345478 -0.838104 -0.439443 1.179204 4 0
17 0.459031 1.258961 -0.329412 1.391790 -0.208888 1.059241 -1.245671 0.619153 0.245780 0.644548 -0.602629 -0.928581 0.739885 4 0
18 -0.359172 0.051214 -0.603962 0.778896 1.630471 1.802477 1.486205 -0.140738 -0.894366 0.736624 2.114721 1.078175 -0.965785 4 0
19 0.209859 -0.615399 -0.676895 0.735655 0.805509 -0.696793 1.073068 0.240429 -0.205934 -0.759693 0.672843 0.569482 -0.455391 4 0
20 0.127381 -0.265099 -0.258801 -0.127568 0.649447 0.244473 1.897421 -0.344616 -0.593159 0.065147 1.787607 1.219355 -0.171813 4 0
21 1.222717 0.409860 1.311826 0.703873 0.322062 0.305461 -0.522644 -0.750833 0.001767 0.017953 0.254329 -0.227762 -0.614790 0 0
22 1.173352 0.490500 0.742825 -0.028159 -0.272396 -0.502733 -0.759443 -1.031924 -0.157975 0.075659 0.604220 0.143298 -0.001849 0 0
23 1.069960 0.858822 -0.795544 0.076688 0.851875 0.735014 -0.758779 0.065595 0.532667 -0.391858 -0.497019 0.240822 0.848126 4 0
24 0.581377 -0.804045 0.399887 1.535671 0.245878 0.904192 -0.233991 -0.925983 0.212280 0.499535 -0.024926 -0.925999 1.294925 4 0
25 0.161110 0.025075 0.716318 1.532230 0.889883 0.353167 -0.058787 -0.593046 0.093773 0.927085 0.199691 -0.979872 0.232850 4 0
26 0.431443 0.442713 0.259120 0.045533 0.102675 0.367606 0.054320 0.942924 0.180609 0.550983 0.265291 0.321252 -0.830969 4 0
27 0.344525 -1.140315 -0.725453 -0.547965 0.449924 0.303904 1.053624 1.051712 0.509322 0.181611 -0.519979 -1.134490 -1.439105 3 0
28 -0.041565 0.671274 0.195143 0.247294 0.531620 1.050124 0.311358 0.988161 -0.198869 0.387795 1.757366 1.351684 0.194840 4 0
29 0.417845 -1.134173 -0.760709 -0.605264 0.077464 0.533333 1.104524 2.124971 0.083548 0.801730 0.092534 -1.281628 -1.468782 3 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 1.532114 -1.060006 -0.434145 -0.999435 -1.259462 0.039140 -0.802013 -0.655286 0.714448 1.005958 -0.086372 0.537392 0.054440 1 1
226 -0.942320 1.172080 0.506725 -0.230675 -0.104635 0.898742 -1.107001 -1.182148 -0.940991 0.232366 1.778224 0.975251 1.731084 0 1
227 1.421974 0.631029 -0.563813 -0.694595 -0.673270 0.929022 0.476907 -1.025173 -0.813644 -0.060006 -0.738730 -0.558099 0.057654 1 1
228 -1.473385 -0.806223 1.849423 -1.252541 0.941013 -0.872947 -1.812392 -0.242718 -0.097212 -0.510500 -0.232195 -0.546399 0.945530 2 1
229 -1.135926 -0.772372 1.164844 -1.022517 0.630202 -0.496999 -1.101656 -0.168921 -0.295159 -0.587401 0.369033 -0.266325 0.604469 2 1
230 -1.085049 0.879566 0.442593 0.128917 0.393498 0.531555 0.392194 1.418515 0.891015 -0.348926 -0.756201 -0.838584 -0.015971 3 1
231 -0.352258 0.556982 0.530520 0.443818 0.300921 0.032128 -0.797384 -0.573532 0.398084 0.328875 -0.274964 -1.300920 0.254456 0 1
232 -1.190363 0.797356 0.758472 0.587917 0.890540 0.471925 0.105793 0.680721 0.230834 -0.150709 -0.816744 -0.470618 0.371198 4 1
233 -0.651003 -0.586618 1.326854 -0.451354 0.507113 0.165474 -0.919675 -0.448249 -1.310940 -1.372737 0.406029 -1.414627 -0.434858 2 1
234 -1.459511 -0.516281 1.631699 -1.141842 0.584621 -0.458541 -1.428877 -0.934556 -0.216455 -0.049794 0.095580 0.387068 0.693730 2 1
235 -0.726984 0.702447 0.798069 -0.320660 0.530902 1.019988 0.144995 0.207847 0.039592 0.220761 0.762941 0.575034 0.671517 4 1
236 -0.300986 -0.404923 0.715406 0.245380 -0.427936 -0.334843 -0.228084 -0.330898 -0.674327 0.199560 0.827455 0.016433 0.866789 0 1
237 -0.736244 0.088611 0.910051 0.437100 0.258256 0.363828 -0.415290 -0.717445 -0.012727 0.436925 -0.786954 -1.217376 0.352825 2 1
238 0.610473 -2.664315 1.303652 -2.022376 1.500032 -1.280926 -1.249533 0.432111 -0.768558 0.291156 -0.092312 0.053770 -0.401166 2 1
239 -2.045424 -2.954642 0.302601 -0.868092 -1.038134 -1.230777 0.514329 0.057591 -1.023895 0.275395 -1.450282 0.386242 0.318763 2 1
240 0.329793 -1.367570 -1.454329 -0.207924 -0.723609 -0.149025 -0.085298 -0.011595 -0.240239 -0.009120 -0.325229 -0.025722 0.114182 1 1
241 -1.919591 1.382172 -0.134161 0.837967 -0.687780 0.944303 -0.258652 -0.742178 0.386031 -1.178099 -1.843543 -0.710556 -0.318561 3 1
242 -2.087669 1.400006 -0.494964 0.451717 -0.759188 0.736625 0.133121 -0.196031 1.121231 0.474128 -0.345937 -0.409324 -0.442069 4 1
243 -2.131652 0.439305 -0.612226 0.854126 -0.494550 0.825299 0.301373 -0.018964 0.690556 -0.078762 -0.709495 -0.075857 -0.418656 4 1
244 -1.611989 -0.756403 -0.410917 1.075909 0.297336 -1.317576 1.115011 -0.467065 -0.768378 1.615499 1.611125 -1.018782 -1.798744 2 1
245 -0.142010 0.000190 -0.063461 -0.506353 -0.386942 -0.256144 0.270621 -1.497417 0.507892 0.456828 -0.431169 -0.978417 0.015849 2 1
246 -1.263975 -1.168117 -1.396090 -0.312016 1.862268 1.400290 0.646060 -0.686864 0.418524 -0.069926 -0.653856 -0.853617 -0.106814 2 1
247 -0.507700 0.899825 1.510153 1.083642 2.081451 0.589016 0.901321 0.658808 0.152596 0.176442 -0.447633 0.287838 0.650479 4 1
248 -0.159768 0.518093 2.197018 0.698491 0.476336 -2.014255 -1.614667 -0.397282 -1.781932 -0.208894 1.650551 -0.771436 -0.987237 0 1
249 -1.037899 1.016712 2.774230 0.665468 -0.385673 0.587263 -0.121609 -0.331379 0.622484 -0.387131 -0.276584 0.218207 1.689216 0 1
250 -0.526923 -1.169944 0.474875 -0.789231 0.369827 -0.537003 -1.089843 -0.173366 -0.023237 -0.142334 0.740065 0.813114 0.872556 2 1
251 -0.770856 -1.024349 -0.019140 -0.097521 0.092703 0.369242 -0.273901 0.190740 -0.074032 0.113055 0.140291 -0.696275 0.166679 2 1
252 -0.905458 -0.790575 0.206164 -0.723816 -0.444860 0.107833 -0.734514 -0.533865 -0.634334 0.320526 0.088428 -0.348210 0.347201 2 1
253 -1.378235 -0.338405 0.016815 -0.394563 0.034043 1.023865 -0.303960 -1.316121 0.198697 0.670577 0.809574 0.580565 0.056004 2 1
254 -0.199959 -2.035812 -0.904507 -1.511975 -0.437843 0.262972 -1.943788 -1.963300 -2.256227 0.354369 -0.039829 0.882325 0.139307 2 1

255 rows × 15 columns

In [103]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[103]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b8263682e8>
In [104]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

Gramma

ANN

In [60]:
X = df_n_ps_std_mfcc[2]
In [61]:
y = df_n_ps[2]['chosen']
In [62]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [63]:
X_train.shape
Out[63]:
(162, 13)
In [64]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [65]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [66]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [112]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.007, 'max_iter': 10}, que permiten obtener un Accuracy de 74.07% y un Kappa del 19.05
Tiempo total: 24.44 minutos
In [69]:
grid.best_params_={'activation': 'relu', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.007, 'max_iter': 10}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [70]:
input_tensor = Input(shape = (n0,))
In [71]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [72]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [73]:
model.summary()
Model: "model_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_4 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_8 (Dense)              (None, 30)                420       
_________________________________________________________________
dense_9 (Dense)              (None, 30)                930       
_________________________________________________________________
dense_10 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_11 (Dense)             (None, 1)                 31        
=================================================================
Total params: 2,311
Trainable params: 2,311
Non-trainable params: 0
_________________________________________________________________
In [74]:
print(epochs)
10
In [75]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), batch_size= 32,
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 162 samples, validate on 54 samples
Epoch 1/10
162/162 [==============================] - 0s 2ms/step - loss: 0.6142 - accuracy: 0.7099 - val_loss: 0.6409 - val_accuracy: 0.6481
Epoch 2/10
162/162 [==============================] - 0s 80us/step - loss: 0.5585 - accuracy: 0.7284 - val_loss: 0.6599 - val_accuracy: 0.6481
Epoch 3/10
162/162 [==============================] - 0s 86us/step - loss: 0.4944 - accuracy: 0.7284 - val_loss: 0.6361 - val_accuracy: 0.6481
Epoch 4/10
162/162 [==============================] - 0s 86us/step - loss: 0.4378 - accuracy: 0.7593 - val_loss: 0.6635 - val_accuracy: 0.6481
Epoch 5/10
162/162 [==============================] - 0s 74us/step - loss: 0.3908 - accuracy: 0.7963 - val_loss: 0.7111 - val_accuracy: 0.7037
Epoch 6/10
162/162 [==============================] - 0s 68us/step - loss: 0.3391 - accuracy: 0.8457 - val_loss: 0.7251 - val_accuracy: 0.6296
Epoch 7/10
162/162 [==============================] - 0s 74us/step - loss: 0.3227 - accuracy: 0.8457 - val_loss: 0.8167 - val_accuracy: 0.6296
Epoch 8/10
162/162 [==============================] - 0s 80us/step - loss: 0.2839 - accuracy: 0.8827 - val_loss: 0.8993 - val_accuracy: 0.7037
Epoch 9/10
162/162 [==============================] - 0s 86us/step - loss: 0.2271 - accuracy: 0.9074 - val_loss: 0.9571 - val_accuracy: 0.6667
Epoch 10/10
162/162 [==============================] - 0s 80us/step - loss: 0.1925 - accuracy: 0.9321 - val_loss: 1.0439 - val_accuracy: 0.6852
In [76]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 10)
In [77]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
54/54 [==============================] - 0s 56us/step
test loss: 1.043857611991741, test accuracy: 0.6851851940155029
In [78]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6706766917293233
In [79]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.20450606585788578
[[32  3]
 [14  5]]

KMeans

In [123]:
X
Out[123]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -0.674917 0.169246 0.673543 1.157142 -0.633186 0.688145 0.215883 -0.452048 1.101066 0.064017 -0.153703 1.751289 0.812723
1 0.277269 0.514176 0.200398 0.988939 -1.756594 -0.022788 -0.235704 0.523508 -0.604231 1.188209 0.863617 -0.801768 0.229305
2 1.483921 0.724793 0.473099 0.439577 -0.358096 -0.452581 -0.213173 -0.596057 -0.767473 0.696227 -0.111259 -0.370649 -1.325817
3 -0.734008 -0.683844 -0.764866 -0.225060 -0.261235 -0.243429 0.588768 0.874148 1.302526 0.091256 -0.600323 -0.827452 0.390838
4 -0.834815 -0.735908 -1.177596 -0.093532 0.508050 0.503458 1.380798 1.847226 1.227896 0.017729 -0.329325 -0.953249 -0.125917
5 -1.113858 2.313247 2.338424 0.540760 1.997911 -1.396188 0.364160 -0.986730 0.116982 0.425551 -0.453775 0.268916 0.198090
6 -0.049155 1.019548 1.191337 0.701078 0.841417 -0.703997 -0.239014 0.884758 0.480985 -0.626873 0.508075 0.590665 -0.448967
7 -1.022436 0.622744 0.627286 0.206854 0.789895 -0.012269 -0.954108 -0.476224 -0.601871 -0.794474 -0.415181 0.024060 0.299063
8 -0.346421 0.521689 -0.035458 -0.183181 1.029652 -0.422948 0.157648 0.274630 0.353045 0.009969 1.058129 0.259981 -0.021521
9 -0.590254 0.458662 1.067949 -0.147777 0.785110 -0.899815 0.640103 0.578162 0.136024 0.434690 0.528031 0.669310 -0.634947
10 0.067247 0.223802 1.376567 -0.118221 1.654318 0.988767 0.958416 0.691113 0.651209 -1.129401 0.906439 -0.090883 -0.385179
11 -0.662803 -0.425347 -0.906661 0.641527 -1.157491 0.162830 0.428003 0.877985 2.072426 1.810909 0.478599 -0.107073 -0.520160
12 0.207925 1.931905 -1.974903 -2.568754 -2.569685 -1.491997 -0.315430 -0.538287 0.695912 0.084372 0.028695 -0.312255 -0.386997
13 -1.789427 -0.891205 0.045638 1.194730 1.102294 1.155199 0.047810 2.200373 3.017003 1.942547 1.032011 -0.120930 -1.909119
14 -0.780900 0.459108 -0.495743 1.223096 0.707236 -0.551337 -0.861033 -0.793874 1.998119 1.432506 -0.100890 -0.116429 -1.057666
15 -1.034563 1.536681 1.491041 0.109475 0.271871 0.544370 -0.797182 0.119653 1.222894 1.391755 0.392527 0.768908 -0.735165
16 -0.381346 2.241723 0.083946 -1.446368 -0.665699 0.546860 0.837295 0.602989 0.518379 0.326439 -0.970460 0.297350 0.320878
17 -0.011403 -0.783092 0.665912 -0.882711 0.950063 -1.443392 1.799779 -1.077081 0.714995 0.322454 -1.819793 1.682432 -1.532329
18 -1.617916 0.548468 0.040068 0.021068 0.781017 0.992310 0.253640 -0.344738 -0.555918 -0.887811 0.176482 -0.003191 0.622949
19 -0.537412 0.128348 0.610943 -0.633727 0.907081 -0.391947 1.899666 -0.986389 1.249064 1.384333 -2.570699 2.135887 -0.427812
20 0.636803 0.760531 -0.118083 0.109273 -0.805302 0.286324 -0.164900 -0.528374 0.268807 0.626366 0.304320 0.726237 0.570514
21 0.550926 0.936063 -0.865446 0.365123 0.084843 1.251656 1.053639 0.703424 0.100402 0.154559 0.495301 -0.203894 -0.338852
22 0.000462 0.578434 -0.015217 0.442323 -0.153009 0.090108 0.305506 0.584208 0.174651 -0.264088 0.347376 0.636010 0.517943
23 0.083874 -0.293534 -0.485861 0.412663 -0.162491 -0.059849 -0.749250 -0.788638 -1.514498 -0.236889 0.140002 -0.164718 0.611117
24 0.908136 0.233711 -1.075967 0.843554 1.180479 0.233656 -0.393576 0.409481 -0.653717 -1.289551 0.026014 0.824044 0.434511
25 0.265049 0.397559 0.003810 1.195218 -0.144563 -1.172408 0.045381 1.297725 -0.617515 -0.190867 0.048723 0.277567 -0.245827
26 0.419617 0.219257 -1.249181 -0.589066 0.167388 0.746567 -0.263331 -1.081856 -1.507797 -2.415256 -0.536913 1.033723 1.595140
27 -0.407925 -0.502431 -2.226256 -1.581302 -0.733748 -0.615724 0.412099 1.100990 1.504300 -1.163924 -0.737084 0.082677 0.211950
28 -0.250483 -2.083167 -1.449789 -0.233210 -1.132840 -1.114482 -0.838611 1.091598 -0.248949 1.322608 0.272731 -0.720656 -0.338540
29 -1.146888 1.617561 0.885685 2.161755 0.393283 -0.057043 -2.032547 -2.796111 1.361183 1.370199 -1.557977 0.512374 1.394544
... ... ... ... ... ... ... ... ... ... ... ... ... ...
186 -1.007290 -0.872850 -0.374124 -0.254709 -1.117777 -1.026836 0.501806 1.604268 1.418314 -1.021132 -0.287873 -1.781727 -0.747651
187 -0.285121 0.118211 0.478011 0.861493 -1.780510 -0.208885 -1.164306 0.685729 0.357957 -0.226263 1.513265 -0.684050 0.458783
188 0.467571 -1.645745 -0.541507 -1.435183 -0.639321 -0.479558 1.424031 0.792277 0.489618 -1.363609 -1.985852 -1.020878 -0.797356
189 -0.071747 -1.954799 -0.239393 -1.169079 -0.773953 0.589966 1.659844 0.265664 -0.650200 -1.717119 -1.918698 -1.463219 -0.447396
190 0.406083 -2.324572 1.268557 -1.773164 0.220142 -1.551887 1.338403 -0.217135 0.387651 0.321311 -1.062866 -1.614390 -3.086337
191 -0.781221 1.062669 1.057162 0.149736 -0.636136 0.428468 -0.863911 -0.159688 -0.045775 0.330912 0.561954 0.363860 -1.261525
192 0.471597 2.019680 2.368481 -1.171548 -2.104846 -0.670947 -1.217913 0.354069 0.841890 -0.529207 0.300156 1.365068 1.105093
193 -0.786022 -0.229684 0.421340 -0.309174 -0.311318 1.204237 0.287163 -0.259205 -0.106656 0.412345 0.480928 -0.201399 -0.613341
194 0.071267 -0.794887 0.430112 0.394775 -0.746468 0.547360 -0.308259 -0.083362 -0.498384 0.517608 -0.616623 -0.235300 0.616686
195 -0.430296 -0.963686 -0.372758 0.074908 -1.551444 -2.289278 -0.996792 -0.144677 0.236885 0.775027 0.825371 0.497421 -0.293443
196 -0.295903 -1.246841 -0.389881 0.186413 -0.827904 -1.740339 -1.375256 -0.532361 -0.762533 -0.840235 0.492296 0.528822 -0.126196
197 0.479442 -2.526563 -2.233640 -1.311649 -0.114249 0.148620 1.711025 0.415772 -2.224830 -1.067674 1.273042 0.432741 -1.020345
198 -0.472461 -0.316221 -2.118686 -0.390396 -0.247602 -0.668064 -0.201965 -0.506232 -0.902628 -1.005551 0.272601 0.248004 -0.241612
199 -0.006912 -1.450305 -0.125795 -1.995008 -1.440314 -0.787148 1.230185 -1.801522 -0.524097 -0.296890 0.146555 -0.059935 -1.742924
200 -1.045376 0.843938 -1.177992 -1.026041 -0.442183 0.610204 1.906959 0.601365 1.149266 1.040025 1.510768 1.525596 0.008502
201 -1.160197 0.899099 -1.084036 -0.745060 -0.704154 0.815025 1.572385 0.694718 1.195502 1.357973 1.719920 1.339687 0.346437
202 0.846379 -0.292164 -2.285100 -0.590310 -1.342705 -0.365748 -0.736446 1.066510 1.737515 0.368654 0.546029 0.404179 -1.048654
203 0.365059 -0.089335 -0.757014 -0.150651 0.520638 0.383469 1.170308 0.915512 -1.099501 -0.480174 -0.052102 0.105614 -0.010947
204 1.230293 1.978904 0.273786 -0.096460 0.345688 0.887889 -0.409813 -0.373808 0.228442 0.696266 0.267416 0.520666 1.007514
205 -0.042146 0.277253 0.012741 -0.226784 -0.676665 -0.286922 0.799616 -0.357473 -0.730921 0.222981 0.053531 0.062325 0.479153
206 -0.520687 -0.536310 0.241607 -0.135026 0.085343 1.155527 1.524244 -0.261140 -0.801648 0.645204 0.296049 1.047521 0.226221
207 -0.563298 -0.749857 0.723480 0.034849 0.249757 0.248513 0.065331 0.789506 0.591196 0.578037 -0.360289 1.288016 0.495527
208 0.959455 -2.318903 -0.452832 -0.892660 -1.054730 1.504668 -1.691862 -1.997066 -1.458509 0.390715 0.578679 0.194999 -0.104548
209 -0.388117 0.504159 0.113071 0.183221 -0.043271 0.766480 -0.092855 -1.130498 0.529058 -0.199386 0.035560 -0.333834 0.078931
210 0.756581 0.841258 1.471905 -0.630055 -1.095463 3.845135 -0.131687 -1.562759 -0.363967 -0.026532 1.803300 -4.065727 -1.024033
211 0.113169 -0.411080 0.219643 0.091081 -0.447327 -0.962500 -0.624101 -1.077007 0.481174 -0.271821 -0.038339 -0.466845 0.707298
212 0.854842 0.556052 1.002658 0.968377 0.557854 -0.579130 -0.854195 1.514705 1.589294 0.351534 -0.962939 0.326626 0.297482
213 1.087836 -1.091223 1.963519 -0.088209 0.598615 0.937138 -1.807416 2.031932 0.230433 0.443770 -0.751601 0.601455 0.233461
214 -0.595874 0.009907 -0.748990 -0.070101 1.262995 1.821824 0.850657 0.030762 0.529494 -0.624150 -1.080924 -0.189073 0.819318
215 -0.002561 0.856829 -0.237776 -1.449113 -0.069162 0.539513 2.378866 0.711870 -1.305789 -0.778617 -0.872224 -2.333142 -1.454909

216 rows × 13 columns

In [124]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[124]:
[2808.0,
 2521.584040394968,
 2363.9652951918915,
 2235.4561240552457,
 2121.3700883394513,
 2055.378171433382,
 1978.4853026183964,
 1920.0476199123686,
 1870.5957280462892,
 1801.6400494660393,
 1755.8728853091284,
 1733.9521869345613,
 1708.3667929415192,
 1622.6522007070782]
In [125]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[125]:
[<matplotlib.lines.Line2D at 0x1b827a6ff60>]

K=2

In [126]:
kmeans_mfcc = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[126]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [127]:
kmeans_mfcc.labels_
Out[127]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0,
       0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0])
In [128]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[128]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0,
       0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0])
In [129]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [130]:
X
Out[130]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -0.674917 0.169246 0.673543 1.157142 -0.633186 0.688145 0.215883 -0.452048 1.101066 0.064017 -0.153703 1.751289 0.812723 1 0
1 0.277269 0.514176 0.200398 0.988939 -1.756594 -0.022788 -0.235704 0.523508 -0.604231 1.188209 0.863617 -0.801768 0.229305 1 0
2 1.483921 0.724793 0.473099 0.439577 -0.358096 -0.452581 -0.213173 -0.596057 -0.767473 0.696227 -0.111259 -0.370649 -1.325817 0 0
3 -0.734008 -0.683844 -0.764866 -0.225060 -0.261235 -0.243429 0.588768 0.874148 1.302526 0.091256 -0.600323 -0.827452 0.390838 0 0
4 -0.834815 -0.735908 -1.177596 -0.093532 0.508050 0.503458 1.380798 1.847226 1.227896 0.017729 -0.329325 -0.953249 -0.125917 0 0
5 -1.113858 2.313247 2.338424 0.540760 1.997911 -1.396188 0.364160 -0.986730 0.116982 0.425551 -0.453775 0.268916 0.198090 1 0
6 -0.049155 1.019548 1.191337 0.701078 0.841417 -0.703997 -0.239014 0.884758 0.480985 -0.626873 0.508075 0.590665 -0.448967 1 0
7 -1.022436 0.622744 0.627286 0.206854 0.789895 -0.012269 -0.954108 -0.476224 -0.601871 -0.794474 -0.415181 0.024060 0.299063 1 0
8 -0.346421 0.521689 -0.035458 -0.183181 1.029652 -0.422948 0.157648 0.274630 0.353045 0.009969 1.058129 0.259981 -0.021521 1 0
9 -0.590254 0.458662 1.067949 -0.147777 0.785110 -0.899815 0.640103 0.578162 0.136024 0.434690 0.528031 0.669310 -0.634947 1 0
10 0.067247 0.223802 1.376567 -0.118221 1.654318 0.988767 0.958416 0.691113 0.651209 -1.129401 0.906439 -0.090883 -0.385179 1 0
11 -0.662803 -0.425347 -0.906661 0.641527 -1.157491 0.162830 0.428003 0.877985 2.072426 1.810909 0.478599 -0.107073 -0.520160 1 0
12 0.207925 1.931905 -1.974903 -2.568754 -2.569685 -1.491997 -0.315430 -0.538287 0.695912 0.084372 0.028695 -0.312255 -0.386997 0 0
13 -1.789427 -0.891205 0.045638 1.194730 1.102294 1.155199 0.047810 2.200373 3.017003 1.942547 1.032011 -0.120930 -1.909119 1 0
14 -0.780900 0.459108 -0.495743 1.223096 0.707236 -0.551337 -0.861033 -0.793874 1.998119 1.432506 -0.100890 -0.116429 -1.057666 1 0
15 -1.034563 1.536681 1.491041 0.109475 0.271871 0.544370 -0.797182 0.119653 1.222894 1.391755 0.392527 0.768908 -0.735165 1 0
16 -0.381346 2.241723 0.083946 -1.446368 -0.665699 0.546860 0.837295 0.602989 0.518379 0.326439 -0.970460 0.297350 0.320878 1 0
17 -0.011403 -0.783092 0.665912 -0.882711 0.950063 -1.443392 1.799779 -1.077081 0.714995 0.322454 -1.819793 1.682432 -1.532329 0 0
18 -1.617916 0.548468 0.040068 0.021068 0.781017 0.992310 0.253640 -0.344738 -0.555918 -0.887811 0.176482 -0.003191 0.622949 1 0
19 -0.537412 0.128348 0.610943 -0.633727 0.907081 -0.391947 1.899666 -0.986389 1.249064 1.384333 -2.570699 2.135887 -0.427812 1 0
20 0.636803 0.760531 -0.118083 0.109273 -0.805302 0.286324 -0.164900 -0.528374 0.268807 0.626366 0.304320 0.726237 0.570514 1 0
21 0.550926 0.936063 -0.865446 0.365123 0.084843 1.251656 1.053639 0.703424 0.100402 0.154559 0.495301 -0.203894 -0.338852 1 0
22 0.000462 0.578434 -0.015217 0.442323 -0.153009 0.090108 0.305506 0.584208 0.174651 -0.264088 0.347376 0.636010 0.517943 1 0
23 0.083874 -0.293534 -0.485861 0.412663 -0.162491 -0.059849 -0.749250 -0.788638 -1.514498 -0.236889 0.140002 -0.164718 0.611117 1 0
24 0.908136 0.233711 -1.075967 0.843554 1.180479 0.233656 -0.393576 0.409481 -0.653717 -1.289551 0.026014 0.824044 0.434511 1 0
25 0.265049 0.397559 0.003810 1.195218 -0.144563 -1.172408 0.045381 1.297725 -0.617515 -0.190867 0.048723 0.277567 -0.245827 1 0
26 0.419617 0.219257 -1.249181 -0.589066 0.167388 0.746567 -0.263331 -1.081856 -1.507797 -2.415256 -0.536913 1.033723 1.595140 1 0
27 -0.407925 -0.502431 -2.226256 -1.581302 -0.733748 -0.615724 0.412099 1.100990 1.504300 -1.163924 -0.737084 0.082677 0.211950 0 0
28 -0.250483 -2.083167 -1.449789 -0.233210 -1.132840 -1.114482 -0.838611 1.091598 -0.248949 1.322608 0.272731 -0.720656 -0.338540 0 0
29 -1.146888 1.617561 0.885685 2.161755 0.393283 -0.057043 -2.032547 -2.796111 1.361183 1.370199 -1.557977 0.512374 1.394544 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
186 -1.007290 -0.872850 -0.374124 -0.254709 -1.117777 -1.026836 0.501806 1.604268 1.418314 -1.021132 -0.287873 -1.781727 -0.747651 0 1
187 -0.285121 0.118211 0.478011 0.861493 -1.780510 -0.208885 -1.164306 0.685729 0.357957 -0.226263 1.513265 -0.684050 0.458783 1 1
188 0.467571 -1.645745 -0.541507 -1.435183 -0.639321 -0.479558 1.424031 0.792277 0.489618 -1.363609 -1.985852 -1.020878 -0.797356 0 1
189 -0.071747 -1.954799 -0.239393 -1.169079 -0.773953 0.589966 1.659844 0.265664 -0.650200 -1.717119 -1.918698 -1.463219 -0.447396 0 1
190 0.406083 -2.324572 1.268557 -1.773164 0.220142 -1.551887 1.338403 -0.217135 0.387651 0.321311 -1.062866 -1.614390 -3.086337 0 1
191 -0.781221 1.062669 1.057162 0.149736 -0.636136 0.428468 -0.863911 -0.159688 -0.045775 0.330912 0.561954 0.363860 -1.261525 1 1
192 0.471597 2.019680 2.368481 -1.171548 -2.104846 -0.670947 -1.217913 0.354069 0.841890 -0.529207 0.300156 1.365068 1.105093 1 1
193 -0.786022 -0.229684 0.421340 -0.309174 -0.311318 1.204237 0.287163 -0.259205 -0.106656 0.412345 0.480928 -0.201399 -0.613341 1 1
194 0.071267 -0.794887 0.430112 0.394775 -0.746468 0.547360 -0.308259 -0.083362 -0.498384 0.517608 -0.616623 -0.235300 0.616686 1 1
195 -0.430296 -0.963686 -0.372758 0.074908 -1.551444 -2.289278 -0.996792 -0.144677 0.236885 0.775027 0.825371 0.497421 -0.293443 0 1
196 -0.295903 -1.246841 -0.389881 0.186413 -0.827904 -1.740339 -1.375256 -0.532361 -0.762533 -0.840235 0.492296 0.528822 -0.126196 0 1
197 0.479442 -2.526563 -2.233640 -1.311649 -0.114249 0.148620 1.711025 0.415772 -2.224830 -1.067674 1.273042 0.432741 -1.020345 0 1
198 -0.472461 -0.316221 -2.118686 -0.390396 -0.247602 -0.668064 -0.201965 -0.506232 -0.902628 -1.005551 0.272601 0.248004 -0.241612 0 1
199 -0.006912 -1.450305 -0.125795 -1.995008 -1.440314 -0.787148 1.230185 -1.801522 -0.524097 -0.296890 0.146555 -0.059935 -1.742924 0 1
200 -1.045376 0.843938 -1.177992 -1.026041 -0.442183 0.610204 1.906959 0.601365 1.149266 1.040025 1.510768 1.525596 0.008502 1 1
201 -1.160197 0.899099 -1.084036 -0.745060 -0.704154 0.815025 1.572385 0.694718 1.195502 1.357973 1.719920 1.339687 0.346437 1 1
202 0.846379 -0.292164 -2.285100 -0.590310 -1.342705 -0.365748 -0.736446 1.066510 1.737515 0.368654 0.546029 0.404179 -1.048654 0 1
203 0.365059 -0.089335 -0.757014 -0.150651 0.520638 0.383469 1.170308 0.915512 -1.099501 -0.480174 -0.052102 0.105614 -0.010947 0 1
204 1.230293 1.978904 0.273786 -0.096460 0.345688 0.887889 -0.409813 -0.373808 0.228442 0.696266 0.267416 0.520666 1.007514 1 1
205 -0.042146 0.277253 0.012741 -0.226784 -0.676665 -0.286922 0.799616 -0.357473 -0.730921 0.222981 0.053531 0.062325 0.479153 1 1
206 -0.520687 -0.536310 0.241607 -0.135026 0.085343 1.155527 1.524244 -0.261140 -0.801648 0.645204 0.296049 1.047521 0.226221 1 1
207 -0.563298 -0.749857 0.723480 0.034849 0.249757 0.248513 0.065331 0.789506 0.591196 0.578037 -0.360289 1.288016 0.495527 1 1
208 0.959455 -2.318903 -0.452832 -0.892660 -1.054730 1.504668 -1.691862 -1.997066 -1.458509 0.390715 0.578679 0.194999 -0.104548 1 1
209 -0.388117 0.504159 0.113071 0.183221 -0.043271 0.766480 -0.092855 -1.130498 0.529058 -0.199386 0.035560 -0.333834 0.078931 1 1
210 0.756581 0.841258 1.471905 -0.630055 -1.095463 3.845135 -0.131687 -1.562759 -0.363967 -0.026532 1.803300 -4.065727 -1.024033 1 1
211 0.113169 -0.411080 0.219643 0.091081 -0.447327 -0.962500 -0.624101 -1.077007 0.481174 -0.271821 -0.038339 -0.466845 0.707298 1 1
212 0.854842 0.556052 1.002658 0.968377 0.557854 -0.579130 -0.854195 1.514705 1.589294 0.351534 -0.962939 0.326626 0.297482 1 1
213 1.087836 -1.091223 1.963519 -0.088209 0.598615 0.937138 -1.807416 2.031932 0.230433 0.443770 -0.751601 0.601455 0.233461 1 1
214 -0.595874 0.009907 -0.748990 -0.070101 1.262995 1.821824 0.850657 0.030762 0.529494 -0.624150 -1.080924 -0.189073 0.819318 1 1
215 -0.002561 0.856829 -0.237776 -1.449113 -0.069162 0.539513 2.378866 0.711870 -1.305789 -0.778617 -0.872224 -2.333142 -1.454909 0 1

216 rows × 15 columns

In [131]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[131]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b827ab1400>
In [132]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

Hotel Marrakech

ANN

In [80]:
X = df_n_ps_std_mfcc[3]
In [81]:
y = df_n_ps[3]['chosen']
In [82]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [83]:
X_train.shape
Out[83]:
(108, 13)
In [84]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [85]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [86]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [140]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (20, 10), 'learning_rate_init': 0.004, 'max_iter': 100}, que permiten obtener un Accuracy de 83.33% y un Kappa del 63.49
Tiempo total: 20.45 minutos
In [87]:
grid.best_params_={'activation': 'relu', 'hidden_layer_sizes': (20, 10), 'learning_rate_init': 0.004, 'max_iter': 100}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [88]:
input_tensor = Input(shape = (n0,))
In [89]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [90]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [91]:
model.summary()
Model: "model_4"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_5 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_12 (Dense)             (None, 20)                280       
_________________________________________________________________
dense_13 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_14 (Dense)             (None, 1)                 11        
=================================================================
Total params: 501
Trainable params: 501
Non-trainable params: 0
_________________________________________________________________
In [92]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 108 samples, validate on 36 samples
Epoch 1/100
108/108 [==============================] - 0s 1ms/step - loss: 0.8313 - accuracy: 0.4259 - val_loss: 0.8155 - val_accuracy: 0.4444
Epoch 2/100
108/108 [==============================] - 0s 111us/step - loss: 0.7307 - accuracy: 0.4630 - val_loss: 0.7477 - val_accuracy: 0.4444
Epoch 3/100
108/108 [==============================] - 0s 93us/step - loss: 0.6711 - accuracy: 0.5648 - val_loss: 0.7059 - val_accuracy: 0.5000
Epoch 4/100
108/108 [==============================] - 0s 83us/step - loss: 0.6324 - accuracy: 0.6852 - val_loss: 0.6815 - val_accuracy: 0.4722
Epoch 5/100
108/108 [==============================] - 0s 93us/step - loss: 0.6061 - accuracy: 0.6944 - val_loss: 0.6654 - val_accuracy: 0.5556
Epoch 6/100
108/108 [==============================] - 0s 102us/step - loss: 0.5839 - accuracy: 0.7315 - val_loss: 0.6534 - val_accuracy: 0.5833
Epoch 7/100
108/108 [==============================] - 0s 93us/step - loss: 0.5622 - accuracy: 0.7593 - val_loss: 0.6457 - val_accuracy: 0.5833
Epoch 8/100
108/108 [==============================] - 0s 74us/step - loss: 0.5424 - accuracy: 0.7500 - val_loss: 0.6399 - val_accuracy: 0.6389
Epoch 9/100
108/108 [==============================] - 0s 93us/step - loss: 0.5240 - accuracy: 0.7500 - val_loss: 0.6382 - val_accuracy: 0.6389
Epoch 10/100
108/108 [==============================] - 0s 93us/step - loss: 0.5038 - accuracy: 0.7778 - val_loss: 0.6336 - val_accuracy: 0.6389
Epoch 11/100
108/108 [==============================] - 0s 74us/step - loss: 0.4851 - accuracy: 0.7593 - val_loss: 0.6279 - val_accuracy: 0.6667
Epoch 12/100
108/108 [==============================] - 0s 83us/step - loss: 0.4685 - accuracy: 0.7870 - val_loss: 0.6221 - val_accuracy: 0.6667
Epoch 13/100
108/108 [==============================] - 0s 111us/step - loss: 0.4519 - accuracy: 0.7870 - val_loss: 0.6197 - val_accuracy: 0.6944
Epoch 14/100
108/108 [==============================] - 0s 102us/step - loss: 0.4344 - accuracy: 0.7963 - val_loss: 0.6187 - val_accuracy: 0.6944
Epoch 15/100
108/108 [==============================] - 0s 83us/step - loss: 0.4175 - accuracy: 0.8333 - val_loss: 0.6177 - val_accuracy: 0.6944
Epoch 16/100
108/108 [==============================] - 0s 102us/step - loss: 0.4005 - accuracy: 0.8611 - val_loss: 0.6205 - val_accuracy: 0.6667
Epoch 17/100
108/108 [==============================] - 0s 102us/step - loss: 0.3834 - accuracy: 0.8704 - val_loss: 0.6232 - val_accuracy: 0.6667
Epoch 18/100
108/108 [==============================] - 0s 102us/step - loss: 0.3670 - accuracy: 0.8704 - val_loss: 0.6273 - val_accuracy: 0.6944
Epoch 19/100
108/108 [==============================] - 0s 83us/step - loss: 0.3514 - accuracy: 0.8611 - val_loss: 0.6273 - val_accuracy: 0.6944
Epoch 20/100
108/108 [==============================] - 0s 93us/step - loss: 0.3355 - accuracy: 0.8796 - val_loss: 0.6265 - val_accuracy: 0.6389
Epoch 21/100
108/108 [==============================] - 0s 93us/step - loss: 0.3209 - accuracy: 0.8889 - val_loss: 0.6259 - val_accuracy: 0.6389
Epoch 22/100
108/108 [==============================] - 0s 93us/step - loss: 0.3040 - accuracy: 0.8981 - val_loss: 0.6295 - val_accuracy: 0.6389
Epoch 23/100
108/108 [==============================] - 0s 83us/step - loss: 0.2892 - accuracy: 0.9074 - val_loss: 0.6293 - val_accuracy: 0.6389

Epoch 00023: ReduceLROnPlateau reducing learning rate to 0.0020000000949949026.
Epoch 24/100
108/108 [==============================] - 0s 102us/step - loss: 0.2760 - accuracy: 0.9074 - val_loss: 0.6278 - val_accuracy: 0.6389
Epoch 25/100
108/108 [==============================] - 0s 102us/step - loss: 0.2682 - accuracy: 0.9074 - val_loss: 0.6278 - val_accuracy: 0.6667
Epoch 26/100
108/108 [==============================] - 0s 74us/step - loss: 0.2611 - accuracy: 0.9167 - val_loss: 0.6304 - val_accuracy: 0.6667
Epoch 27/100
108/108 [==============================] - 0s 102us/step - loss: 0.2541 - accuracy: 0.9167 - val_loss: 0.6343 - val_accuracy: 0.6667
Epoch 28/100
108/108 [==============================] - 0s 93us/step - loss: 0.2467 - accuracy: 0.9167 - val_loss: 0.6322 - val_accuracy: 0.6667
Epoch 29/100
108/108 [==============================] - 0s 102us/step - loss: 0.2392 - accuracy: 0.9259 - val_loss: 0.6337 - val_accuracy: 0.6667
Epoch 30/100
108/108 [==============================] - 0s 83us/step - loss: 0.2318 - accuracy: 0.9352 - val_loss: 0.6348 - val_accuracy: 0.6944
Epoch 31/100
108/108 [==============================] - 0s 93us/step - loss: 0.2250 - accuracy: 0.9444 - val_loss: 0.6407 - val_accuracy: 0.6944
Epoch 32/100
108/108 [==============================] - 0s 102us/step - loss: 0.2176 - accuracy: 0.9444 - val_loss: 0.6457 - val_accuracy: 0.6944
Epoch 33/100
108/108 [==============================] - 0s 93us/step - loss: 0.2109 - accuracy: 0.9444 - val_loss: 0.6501 - val_accuracy: 0.6944

Epoch 00033: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 34/100
108/108 [==============================] - 0s 74us/step - loss: 0.2048 - accuracy: 0.9444 - val_loss: 0.6533 - val_accuracy: 0.6944
Epoch 35/100
108/108 [==============================] - 0s 102us/step - loss: 0.2015 - accuracy: 0.9444 - val_loss: 0.6569 - val_accuracy: 0.6944
Epoch 36/100
108/108 [==============================] - 0s 102us/step - loss: 0.1985 - accuracy: 0.9537 - val_loss: 0.6569 - val_accuracy: 0.6944
Epoch 37/100
108/108 [==============================] - 0s 83us/step - loss: 0.1950 - accuracy: 0.9537 - val_loss: 0.6566 - val_accuracy: 0.6944
Epoch 38/100
108/108 [==============================] - 0s 83us/step - loss: 0.1923 - accuracy: 0.9537 - val_loss: 0.6581 - val_accuracy: 0.6944
Epoch 39/100
108/108 [==============================] - 0s 102us/step - loss: 0.1892 - accuracy: 0.9537 - val_loss: 0.6575 - val_accuracy: 0.6944
Epoch 40/100
108/108 [==============================] - 0s 93us/step - loss: 0.1865 - accuracy: 0.9537 - val_loss: 0.6574 - val_accuracy: 0.6944
Epoch 41/100
108/108 [==============================] - 0s 74us/step - loss: 0.1834 - accuracy: 0.9537 - val_loss: 0.6599 - val_accuracy: 0.6944
Epoch 42/100
108/108 [==============================] - 0s 93us/step - loss: 0.1805 - accuracy: 0.9630 - val_loss: 0.6604 - val_accuracy: 0.6944
Epoch 43/100
108/108 [==============================] - 0s 83us/step - loss: 0.1776 - accuracy: 0.9630 - val_loss: 0.6623 - val_accuracy: 0.6944

Epoch 00043: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 44/100
108/108 [==============================] - 0s 93us/step - loss: 0.1754 - accuracy: 0.9630 - val_loss: 0.6655 - val_accuracy: 0.6944
Epoch 45/100
108/108 [==============================] - 0s 93us/step - loss: 0.1738 - accuracy: 0.9630 - val_loss: 0.6682 - val_accuracy: 0.6944
Epoch 46/100
108/108 [==============================] - 0s 93us/step - loss: 0.1723 - accuracy: 0.9630 - val_loss: 0.6700 - val_accuracy: 0.6944
Epoch 47/100
108/108 [==============================] - 0s 102us/step - loss: 0.1710 - accuracy: 0.9630 - val_loss: 0.6709 - val_accuracy: 0.6944
Epoch 48/100
108/108 [==============================] - 0s 83us/step - loss: 0.1695 - accuracy: 0.9630 - val_loss: 0.6729 - val_accuracy: 0.6944
Epoch 49/100
108/108 [==============================] - 0s 102us/step - loss: 0.1682 - accuracy: 0.9722 - val_loss: 0.6740 - val_accuracy: 0.6944
Epoch 50/100
108/108 [==============================] - 0s 102us/step - loss: 0.1668 - accuracy: 0.9722 - val_loss: 0.6750 - val_accuracy: 0.6944
Epoch 51/100
108/108 [==============================] - 0s 83us/step - loss: 0.1654 - accuracy: 0.9722 - val_loss: 0.6761 - val_accuracy: 0.6944
Epoch 52/100
108/108 [==============================] - 0s 83us/step - loss: 0.1642 - accuracy: 0.9722 - val_loss: 0.6770 - val_accuracy: 0.6944
Epoch 53/100
108/108 [==============================] - 0s 111us/step - loss: 0.1630 - accuracy: 0.9722 - val_loss: 0.6783 - val_accuracy: 0.6944

Epoch 00053: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 54/100
108/108 [==============================] - 0s 111us/step - loss: 0.1618 - accuracy: 0.9722 - val_loss: 0.6791 - val_accuracy: 0.6944
Epoch 55/100
108/108 [==============================] - 0s 130us/step - loss: 0.1612 - accuracy: 0.9722 - val_loss: 0.6804 - val_accuracy: 0.6944
Epoch 56/100
108/108 [==============================] - 0s 111us/step - loss: 0.1606 - accuracy: 0.9722 - val_loss: 0.6819 - val_accuracy: 0.6944
Epoch 57/100
108/108 [==============================] - 0s 83us/step - loss: 0.1600 - accuracy: 0.9722 - val_loss: 0.6833 - val_accuracy: 0.6944
Epoch 58/100
108/108 [==============================] - 0s 93us/step - loss: 0.1594 - accuracy: 0.9722 - val_loss: 0.6844 - val_accuracy: 0.6944
Epoch 59/100
108/108 [==============================] - 0s 102us/step - loss: 0.1587 - accuracy: 0.9722 - val_loss: 0.6850 - val_accuracy: 0.6944
Epoch 60/100
108/108 [==============================] - 0s 102us/step - loss: 0.1580 - accuracy: 0.9722 - val_loss: 0.6856 - val_accuracy: 0.6944
Epoch 61/100
108/108 [==============================] - 0s 111us/step - loss: 0.1574 - accuracy: 0.9722 - val_loss: 0.6854 - val_accuracy: 0.6944
Epoch 62/100
108/108 [==============================] - 0s 111us/step - loss: 0.1567 - accuracy: 0.9722 - val_loss: 0.6852 - val_accuracy: 0.6944
Epoch 63/100
108/108 [==============================] - 0s 93us/step - loss: 0.1561 - accuracy: 0.9722 - val_loss: 0.6849 - val_accuracy: 0.6944

Epoch 00063: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 64/100
108/108 [==============================] - 0s 102us/step - loss: 0.1556 - accuracy: 0.9722 - val_loss: 0.6848 - val_accuracy: 0.6944
Epoch 65/100
108/108 [==============================] - 0s 111us/step - loss: 0.1552 - accuracy: 0.9722 - val_loss: 0.6847 - val_accuracy: 0.6944
Epoch 66/100
108/108 [==============================] - 0s 111us/step - loss: 0.1550 - accuracy: 0.9722 - val_loss: 0.6847 - val_accuracy: 0.6944
Epoch 67/100
108/108 [==============================] - 0s 102us/step - loss: 0.1547 - accuracy: 0.9722 - val_loss: 0.6849 - val_accuracy: 0.6944
Epoch 68/100
108/108 [==============================] - 0s 102us/step - loss: 0.1543 - accuracy: 0.9722 - val_loss: 0.6848 - val_accuracy: 0.6944
Epoch 69/100
108/108 [==============================] - 0s 102us/step - loss: 0.1540 - accuracy: 0.9722 - val_loss: 0.6847 - val_accuracy: 0.6944
Epoch 70/100
108/108 [==============================] - 0s 111us/step - loss: 0.1537 - accuracy: 0.9722 - val_loss: 0.6845 - val_accuracy: 0.6944
Epoch 71/100
108/108 [==============================] - 0s 102us/step - loss: 0.1534 - accuracy: 0.9722 - val_loss: 0.6849 - val_accuracy: 0.6944
Epoch 72/100
108/108 [==============================] - 0s 93us/step - loss: 0.1531 - accuracy: 0.9722 - val_loss: 0.6852 - val_accuracy: 0.6944
Epoch 73/100
108/108 [==============================] - 0s 83us/step - loss: 0.1528 - accuracy: 0.9722 - val_loss: 0.6855 - val_accuracy: 0.6944

Epoch 00073: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 74/100
108/108 [==============================] - 0s 93us/step - loss: 0.1525 - accuracy: 0.9722 - val_loss: 0.6857 - val_accuracy: 0.6944
Epoch 75/100
108/108 [==============================] - 0s 102us/step - loss: 0.1523 - accuracy: 0.9722 - val_loss: 0.6858 - val_accuracy: 0.6944
Epoch 76/100
108/108 [==============================] - 0s 102us/step - loss: 0.1522 - accuracy: 0.9722 - val_loss: 0.6857 - val_accuracy: 0.6944
Epoch 77/100
108/108 [==============================] - 0s 93us/step - loss: 0.1521 - accuracy: 0.9722 - val_loss: 0.6858 - val_accuracy: 0.6944
Epoch 78/100
108/108 [==============================] - 0s 102us/step - loss: 0.1519 - accuracy: 0.9722 - val_loss: 0.6859 - val_accuracy: 0.6944
Epoch 79/100
108/108 [==============================] - 0s 93us/step - loss: 0.1517 - accuracy: 0.9722 - val_loss: 0.6863 - val_accuracy: 0.6944
Epoch 80/100
108/108 [==============================] - 0s 102us/step - loss: 0.1516 - accuracy: 0.9722 - val_loss: 0.6866 - val_accuracy: 0.6944
Epoch 81/100
108/108 [==============================] - 0s 83us/step - loss: 0.1514 - accuracy: 0.9722 - val_loss: 0.6869 - val_accuracy: 0.6944
Epoch 82/100
108/108 [==============================] - 0s 93us/step - loss: 0.1513 - accuracy: 0.9722 - val_loss: 0.6874 - val_accuracy: 0.6944
Epoch 83/100
108/108 [==============================] - 0s 93us/step - loss: 0.1511 - accuracy: 0.9722 - val_loss: 0.6877 - val_accuracy: 0.6944

Epoch 00083: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 84/100
108/108 [==============================] - 0s 83us/step - loss: 0.1510 - accuracy: 0.9722 - val_loss: 0.6879 - val_accuracy: 0.6944
Epoch 85/100
108/108 [==============================] - 0s 83us/step - loss: 0.1509 - accuracy: 0.9722 - val_loss: 0.6880 - val_accuracy: 0.6944
Epoch 86/100
108/108 [==============================] - 0s 93us/step - loss: 0.1508 - accuracy: 0.9722 - val_loss: 0.6881 - val_accuracy: 0.6944
Epoch 87/100
108/108 [==============================] - 0s 83us/step - loss: 0.1508 - accuracy: 0.9722 - val_loss: 0.6882 - val_accuracy: 0.6944
Epoch 88/100
108/108 [==============================] - 0s 74us/step - loss: 0.1507 - accuracy: 0.9722 - val_loss: 0.6883 - val_accuracy: 0.6944
Epoch 89/100
108/108 [==============================] - 0s 102us/step - loss: 0.1506 - accuracy: 0.9722 - val_loss: 0.6883 - val_accuracy: 0.6944
Epoch 90/100
108/108 [==============================] - 0s 93us/step - loss: 0.1505 - accuracy: 0.9722 - val_loss: 0.6884 - val_accuracy: 0.6944
Epoch 91/100
108/108 [==============================] - 0s 83us/step - loss: 0.1505 - accuracy: 0.9722 - val_loss: 0.6884 - val_accuracy: 0.6944
Epoch 92/100
108/108 [==============================] - 0s 111us/step - loss: 0.1504 - accuracy: 0.9722 - val_loss: 0.6886 - val_accuracy: 0.6944
Epoch 93/100
108/108 [==============================] - 0s 93us/step - loss: 0.1503 - accuracy: 0.9722 - val_loss: 0.6888 - val_accuracy: 0.6944

Epoch 00093: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 94/100
108/108 [==============================] - 0s 102us/step - loss: 0.1502 - accuracy: 0.9722 - val_loss: 0.6889 - val_accuracy: 0.6944
Epoch 95/100
108/108 [==============================] - 0s 83us/step - loss: 0.1502 - accuracy: 0.9722 - val_loss: 0.6890 - val_accuracy: 0.6944
Epoch 96/100
108/108 [==============================] - 0s 111us/step - loss: 0.1502 - accuracy: 0.9722 - val_loss: 0.6891 - val_accuracy: 0.6944
Epoch 97/100
108/108 [==============================] - 0s 102us/step - loss: 0.1501 - accuracy: 0.9722 - val_loss: 0.6892 - val_accuracy: 0.6944
Epoch 98/100
108/108 [==============================] - 0s 83us/step - loss: 0.1501 - accuracy: 0.9722 - val_loss: 0.6893 - val_accuracy: 0.6944
Epoch 99/100
108/108 [==============================] - 0s 83us/step - loss: 0.1500 - accuracy: 0.9722 - val_loss: 0.6893 - val_accuracy: 0.6944
Epoch 100/100
108/108 [==============================] - 0s 93us/step - loss: 0.1500 - accuracy: 0.9722 - val_loss: 0.6894 - val_accuracy: 0.6944
In [93]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 100)
In [94]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
36/36 [==============================] - 0s 83us/step
test loss: 0.6894227663675944, test accuracy: 0.6944444179534912
In [95]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7725752508361204
In [96]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.38888888888888884
[[15  8]
 [ 3 10]]

KMeans

In [151]:
X
Out[151]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.221235 1.617887 0.929874 -0.231486 -0.525862 1.384826 0.709441 0.512679 -2.231286 -2.278872 -0.728806 -2.187766 -1.206544
1 0.836735 -0.529605 -1.268139 -0.791053 0.815880 -1.992230 -0.371430 -0.356669 1.323871 0.946394 -1.085097 0.673490 -1.496313
2 -0.190995 1.202756 0.050028 -2.631154 3.701544 -1.158173 0.439586 2.317548 -2.282526 -1.571775 -2.541951 -2.587380 -2.132445
3 0.521202 1.354284 1.423683 -0.634173 0.934734 0.214772 -0.349135 1.009101 -2.193012 -0.301254 -0.356046 -0.668937 -0.421263
4 0.250234 1.586078 -1.791096 0.127156 1.573000 0.288525 1.962471 1.500627 1.352853 -1.921935 0.705405 -0.230103 -0.803009
5 0.333238 -0.983017 -1.253129 -0.703445 -0.390741 -0.904476 -0.271254 1.452321 -0.581049 0.023331 -1.113368 1.679210 1.637860
6 0.124335 -1.665835 -2.227171 -0.835611 0.574827 -2.080147 -0.569831 1.427555 0.050593 0.046984 0.165593 2.377458 0.856636
7 -0.874325 -0.031806 0.246893 0.344556 0.649466 0.670946 0.058839 -0.056305 -0.189911 -0.166885 0.518138 -0.045234 -0.062791
8 0.326792 -0.610166 -0.550927 -0.576489 -0.918038 -0.331125 -0.592815 0.924677 0.042034 0.230600 -0.251229 0.688556 1.249983
9 -1.469326 0.426500 2.520170 1.045079 -0.410299 0.561905 0.502913 -0.074933 -0.194040 -0.642182 -0.535037 -0.277243 0.886176
10 -1.602658 -1.245330 -1.650120 1.696521 -0.387907 -0.033057 -1.456368 0.024357 1.001153 1.565037 -0.646889 0.239423 0.993436
11 -1.820295 -1.526937 -1.324778 2.559842 -0.126763 -0.206232 -1.800200 0.194869 0.807199 0.370292 -0.668713 0.417022 1.455716
12 -0.764909 0.894911 0.268560 -0.209448 -0.230305 -0.132543 0.157299 -0.216200 -0.073587 0.425168 1.208283 0.176103 -0.227052
13 -2.021592 -0.256925 0.107447 0.355253 1.242070 -0.097325 0.575987 0.181357 -0.173788 0.312045 -1.135063 -1.669265 -1.430807
14 -0.561508 0.034689 -0.118674 0.135424 -0.766656 -0.507684 0.108448 0.723647 0.198360 -0.010792 0.011033 -0.011280 -1.011492
15 1.081988 0.945357 -0.230748 0.314322 0.133962 0.296502 0.325135 -0.722499 -0.700028 -0.027093 0.102971 -0.815814 0.889176
16 0.341450 3.113983 0.419314 1.087397 2.159626 0.542690 0.106593 0.433048 -0.130652 -0.483111 0.378798 0.475149 0.814020
17 -0.465680 0.048325 -1.647951 -1.343412 0.783737 1.435160 -0.831385 0.665605 0.805481 1.237871 0.437312 0.293056 -0.077970
18 -0.518957 -0.205911 -0.935801 -0.519162 0.488287 0.429763 0.957373 0.025670 -0.655265 -0.541455 1.027838 0.655340 0.929453
19 0.065304 0.074045 0.004340 0.740126 0.742134 0.291814 -0.940237 -0.030565 2.291217 0.873108 0.369910 0.824246 0.356735
20 -0.383757 0.437022 0.907339 0.842096 1.161748 0.721193 0.231956 -0.806816 -0.441393 -0.075681 0.273756 0.148276 0.377982
21 0.558290 1.010237 -0.882410 -0.313261 -1.108460 -0.061251 1.439980 0.042899 -0.731331 -1.580437 -1.114403 -1.504838 -0.119322
22 0.470380 -0.322318 -1.191863 -0.092570 0.408703 -1.017213 0.435319 0.248928 -0.094380 0.418018 0.217435 -0.516998 -0.868055
23 0.594195 0.466425 -1.651421 -1.153236 -2.163553 -1.957716 -0.238416 0.695815 0.332270 -0.061240 -0.338001 1.191380 0.653576
24 -0.294279 -0.469828 -0.506281 0.553411 -0.002394 -1.210177 -1.635390 -0.068027 -0.072194 -0.646855 -0.472007 0.640983 1.465438
25 -0.044753 0.008070 0.124675 0.401165 -1.495260 -2.095123 -1.070614 0.040115 -0.592197 -0.491126 0.440431 1.007611 0.450109
26 -0.361795 -0.336369 -1.184798 0.051223 0.460059 -1.947295 -2.267630 0.341965 0.234544 0.029052 0.004198 0.189779 1.146805
27 0.842916 0.418905 -0.554491 0.388861 1.276091 0.351522 0.411002 -0.949650 -0.431041 0.317882 -0.888404 -0.897541 -0.085071
28 -1.213666 1.250188 -1.066058 -0.663600 1.489036 0.477183 0.408848 0.354581 -0.567870 0.781751 -0.085926 2.017666 0.067778
29 -1.003139 0.015927 -0.366063 0.248303 0.256190 0.748222 1.200549 0.305672 -0.124149 -0.260150 -0.065036 0.009866 -0.589000
... ... ... ... ... ... ... ... ... ... ... ... ... ...
114 0.145143 -1.087536 -0.109574 1.182367 -0.234880 0.595609 -0.167528 -0.375039 -0.229571 -0.013944 0.570663 -0.431994 0.150206
115 -1.711163 -0.746315 -0.531486 -0.426122 0.265388 0.755026 0.557866 -0.376462 0.447626 -1.092869 -1.345461 -1.913386 -0.678209
116 0.392290 1.069548 -0.306184 0.145336 -0.217734 -0.733749 0.435935 0.749464 1.217672 0.211105 1.231701 0.188785 0.434754
117 0.935840 0.716917 0.007000 0.627264 0.191403 -0.333462 0.823069 0.058921 0.011376 0.852634 0.522502 0.556864 0.607905
118 0.197008 0.584804 -0.002674 0.755814 0.284618 -1.252463 0.407862 0.894361 0.769537 0.220578 0.749417 0.100243 -0.151070
119 2.405490 -0.155599 0.811293 1.347936 0.825563 0.461353 0.894124 0.344794 1.893152 2.258728 0.129193 -0.243084 -0.058467
120 0.715503 0.871839 0.292274 0.271363 -0.696526 0.777498 2.464116 -0.386285 1.685524 1.576706 0.185429 0.140475 0.421924
121 2.417611 0.241031 1.233666 0.460035 0.057428 0.040149 3.378156 0.381120 -0.121501 -0.324116 -0.176822 1.227364 0.614724
122 -0.318929 -0.810559 -0.844588 0.201697 -0.001562 0.245109 0.080448 -0.549388 1.103198 0.291492 0.110564 -0.673124 -1.460988
123 0.103167 -0.475246 0.116339 -0.525138 -0.644659 0.818829 0.559379 -0.142633 -0.157352 -0.469393 0.012773 -0.547972 -0.137509
124 -0.094617 -0.605067 -0.742357 0.268847 0.524751 0.458324 0.276908 -0.302065 0.138633 0.644511 0.740681 -0.411278 -0.888780
125 -1.595914 -0.979410 -0.416646 0.594734 -0.542828 0.792587 -1.964236 -0.008409 -0.175168 0.129402 -0.381751 0.557976 0.657622
126 -1.902096 -0.148093 -0.337830 -0.273661 -0.493964 -0.088475 -0.220561 -0.292045 -0.103285 0.166066 0.662068 0.766244 0.425791
127 -1.605568 -0.554121 0.460876 1.019950 -0.718272 1.457022 -1.697711 0.811490 -0.088705 -0.660478 -0.973868 0.581804 0.414185
128 0.566475 -0.266552 -0.411015 -1.148969 0.130007 -0.378875 -0.472183 -1.264777 0.333529 -0.198779 0.021059 -0.888067 0.082910
129 0.526340 -0.028603 -0.164620 -1.269989 0.446406 -0.087302 -0.000812 -1.253016 -0.043573 -0.441529 -0.260177 -1.155496 0.048018
130 -0.506596 -0.917555 0.022348 -2.928480 -0.397810 1.517764 -2.223896 -3.454969 -0.250982 2.385778 2.673937 0.852384 0.503967
131 0.241077 0.331430 -0.183360 0.450777 1.050602 1.030703 -0.632082 -0.613020 -0.605923 -0.445150 0.601823 -0.068405 -0.239919
132 0.384872 0.341763 0.211728 -0.437357 1.099689 2.173078 -0.523080 0.221203 1.791178 -0.924414 0.882953 -0.116331 -0.105512
133 0.926004 -0.715730 0.121647 -0.799985 -1.043066 -1.309977 0.089931 0.501863 1.200490 0.946479 0.949711 0.122093 -0.106795
134 0.108168 -0.201129 -0.571770 -0.710757 -0.229892 -0.308178 1.035292 0.063963 -0.410105 0.116893 1.018084 -0.213796 0.002336
135 -0.243379 0.039038 -0.185527 0.341086 0.733620 -1.255089 0.422881 0.253117 1.161221 0.252360 0.377116 -0.444191 0.208913
136 1.306831 0.220282 -0.386785 -1.153056 1.946157 1.302925 -1.248915 -0.883391 -0.265715 1.982123 1.319726 0.442799 1.500702
137 1.247354 0.605285 -0.077094 0.407727 1.841346 -0.789475 -1.501921 0.627200 1.160569 -0.001277 -0.516474 0.653115 1.248304
138 0.708935 -1.127949 0.861675 -1.147026 -0.241837 -1.657997 -0.127631 2.115046 -0.340085 1.759678 -0.965027 1.205391 0.879613
139 0.708839 -0.956820 -1.350334 -1.562796 0.707567 -0.389784 -2.197459 2.741627 1.474984 -0.351995 1.728271 1.249063 0.769635
140 1.483186 0.682555 -0.612419 0.474508 0.910933 1.248228 0.176444 0.490360 1.208370 -0.434267 -0.260485 -0.319060 0.067114
141 0.201126 0.561986 0.031425 0.091477 0.565265 1.039870 0.116405 -0.484695 -1.094180 -1.320674 0.773891 0.702095 0.386850
142 0.003924 -1.301791 -0.501001 1.193249 -0.495635 0.002788 -1.372557 -0.559755 -0.221626 -0.659689 0.093759 -0.130319 0.035051
143 -0.055580 -0.659181 -1.244063 -0.214578 0.114523 -0.633686 -0.516044 0.580491 1.035424 0.468185 0.471840 1.199764 0.150708

144 rows × 13 columns

In [152]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[152]:
[1872.0,
 1679.2968315149515,
 1550.7545737855435,
 1446.6172108036508,
 1373.1093324372473,
 1322.1800157528337,
 1260.9432250958696,
 1220.6940995550194,
 1171.7851814633127,
 1127.850987607133,
 1106.05463123734,
 1024.1042711193513,
 1004.7449935321633,
 976.292238260163]
In [153]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[153]:
[<matplotlib.lines.Line2D at 0x1b82940b0b8>]

K=3

In [154]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[154]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [155]:
kmeans_mfcc.labels_
Out[155]:
array([2, 0, 0, 2, 2, 1, 1, 2, 1, 2, 1, 1, 1, 0, 0, 2, 2, 0, 1, 2, 2, 0,
       0, 1, 1, 1, 1, 2, 1, 0, 1, 0, 0, 2, 0, 1, 1, 0, 2, 1, 0, 0, 0, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 2, 1, 0, 2, 2, 0, 2, 0, 2, 1,
       0, 0, 1, 1, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 2, 1, 2,
       0, 2, 2, 2, 1, 2, 1, 2, 2, 0, 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 0, 2,
       0, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 0, 0, 1, 2,
       2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 1, 1])
In [156]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[156]:
array([2, 0, 0, 2, 2, 1, 1, 2, 1, 2, 1, 1, 1, 0, 0, 2, 2, 0, 1, 2, 2, 0,
       0, 1, 1, 1, 1, 2, 1, 0, 1, 0, 0, 2, 0, 1, 1, 0, 2, 1, 0, 0, 0, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 2, 1, 0, 2, 2, 0, 2, 0, 2, 1,
       0, 0, 1, 1, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 2, 1, 2,
       0, 2, 2, 2, 1, 2, 1, 2, 2, 0, 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 0, 2,
       0, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 0, 0, 1, 2,
       2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 1, 1])
In [157]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [158]:
X
Out[158]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.221235 1.617887 0.929874 -0.231486 -0.525862 1.384826 0.709441 0.512679 -2.231286 -2.278872 -0.728806 -2.187766 -1.206544 2 0
1 0.836735 -0.529605 -1.268139 -0.791053 0.815880 -1.992230 -0.371430 -0.356669 1.323871 0.946394 -1.085097 0.673490 -1.496313 0 0
2 -0.190995 1.202756 0.050028 -2.631154 3.701544 -1.158173 0.439586 2.317548 -2.282526 -1.571775 -2.541951 -2.587380 -2.132445 0 0
3 0.521202 1.354284 1.423683 -0.634173 0.934734 0.214772 -0.349135 1.009101 -2.193012 -0.301254 -0.356046 -0.668937 -0.421263 2 0
4 0.250234 1.586078 -1.791096 0.127156 1.573000 0.288525 1.962471 1.500627 1.352853 -1.921935 0.705405 -0.230103 -0.803009 2 0
5 0.333238 -0.983017 -1.253129 -0.703445 -0.390741 -0.904476 -0.271254 1.452321 -0.581049 0.023331 -1.113368 1.679210 1.637860 1 0
6 0.124335 -1.665835 -2.227171 -0.835611 0.574827 -2.080147 -0.569831 1.427555 0.050593 0.046984 0.165593 2.377458 0.856636 1 0
7 -0.874325 -0.031806 0.246893 0.344556 0.649466 0.670946 0.058839 -0.056305 -0.189911 -0.166885 0.518138 -0.045234 -0.062791 2 0
8 0.326792 -0.610166 -0.550927 -0.576489 -0.918038 -0.331125 -0.592815 0.924677 0.042034 0.230600 -0.251229 0.688556 1.249983 1 0
9 -1.469326 0.426500 2.520170 1.045079 -0.410299 0.561905 0.502913 -0.074933 -0.194040 -0.642182 -0.535037 -0.277243 0.886176 2 0
10 -1.602658 -1.245330 -1.650120 1.696521 -0.387907 -0.033057 -1.456368 0.024357 1.001153 1.565037 -0.646889 0.239423 0.993436 1 0
11 -1.820295 -1.526937 -1.324778 2.559842 -0.126763 -0.206232 -1.800200 0.194869 0.807199 0.370292 -0.668713 0.417022 1.455716 1 0
12 -0.764909 0.894911 0.268560 -0.209448 -0.230305 -0.132543 0.157299 -0.216200 -0.073587 0.425168 1.208283 0.176103 -0.227052 1 0
13 -2.021592 -0.256925 0.107447 0.355253 1.242070 -0.097325 0.575987 0.181357 -0.173788 0.312045 -1.135063 -1.669265 -1.430807 0 0
14 -0.561508 0.034689 -0.118674 0.135424 -0.766656 -0.507684 0.108448 0.723647 0.198360 -0.010792 0.011033 -0.011280 -1.011492 0 0
15 1.081988 0.945357 -0.230748 0.314322 0.133962 0.296502 0.325135 -0.722499 -0.700028 -0.027093 0.102971 -0.815814 0.889176 2 0
16 0.341450 3.113983 0.419314 1.087397 2.159626 0.542690 0.106593 0.433048 -0.130652 -0.483111 0.378798 0.475149 0.814020 2 0
17 -0.465680 0.048325 -1.647951 -1.343412 0.783737 1.435160 -0.831385 0.665605 0.805481 1.237871 0.437312 0.293056 -0.077970 0 0
18 -0.518957 -0.205911 -0.935801 -0.519162 0.488287 0.429763 0.957373 0.025670 -0.655265 -0.541455 1.027838 0.655340 0.929453 1 0
19 0.065304 0.074045 0.004340 0.740126 0.742134 0.291814 -0.940237 -0.030565 2.291217 0.873108 0.369910 0.824246 0.356735 2 0
20 -0.383757 0.437022 0.907339 0.842096 1.161748 0.721193 0.231956 -0.806816 -0.441393 -0.075681 0.273756 0.148276 0.377982 2 0
21 0.558290 1.010237 -0.882410 -0.313261 -1.108460 -0.061251 1.439980 0.042899 -0.731331 -1.580437 -1.114403 -1.504838 -0.119322 0 0
22 0.470380 -0.322318 -1.191863 -0.092570 0.408703 -1.017213 0.435319 0.248928 -0.094380 0.418018 0.217435 -0.516998 -0.868055 0 0
23 0.594195 0.466425 -1.651421 -1.153236 -2.163553 -1.957716 -0.238416 0.695815 0.332270 -0.061240 -0.338001 1.191380 0.653576 1 0
24 -0.294279 -0.469828 -0.506281 0.553411 -0.002394 -1.210177 -1.635390 -0.068027 -0.072194 -0.646855 -0.472007 0.640983 1.465438 1 0
25 -0.044753 0.008070 0.124675 0.401165 -1.495260 -2.095123 -1.070614 0.040115 -0.592197 -0.491126 0.440431 1.007611 0.450109 1 0
26 -0.361795 -0.336369 -1.184798 0.051223 0.460059 -1.947295 -2.267630 0.341965 0.234544 0.029052 0.004198 0.189779 1.146805 1 0
27 0.842916 0.418905 -0.554491 0.388861 1.276091 0.351522 0.411002 -0.949650 -0.431041 0.317882 -0.888404 -0.897541 -0.085071 2 0
28 -1.213666 1.250188 -1.066058 -0.663600 1.489036 0.477183 0.408848 0.354581 -0.567870 0.781751 -0.085926 2.017666 0.067778 1 0
29 -1.003139 0.015927 -0.366063 0.248303 0.256190 0.748222 1.200549 0.305672 -0.124149 -0.260150 -0.065036 0.009866 -0.589000 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
114 0.145143 -1.087536 -0.109574 1.182367 -0.234880 0.595609 -0.167528 -0.375039 -0.229571 -0.013944 0.570663 -0.431994 0.150206 2 1
115 -1.711163 -0.746315 -0.531486 -0.426122 0.265388 0.755026 0.557866 -0.376462 0.447626 -1.092869 -1.345461 -1.913386 -0.678209 0 1
116 0.392290 1.069548 -0.306184 0.145336 -0.217734 -0.733749 0.435935 0.749464 1.217672 0.211105 1.231701 0.188785 0.434754 2 1
117 0.935840 0.716917 0.007000 0.627264 0.191403 -0.333462 0.823069 0.058921 0.011376 0.852634 0.522502 0.556864 0.607905 2 1
118 0.197008 0.584804 -0.002674 0.755814 0.284618 -1.252463 0.407862 0.894361 0.769537 0.220578 0.749417 0.100243 -0.151070 2 1
119 2.405490 -0.155599 0.811293 1.347936 0.825563 0.461353 0.894124 0.344794 1.893152 2.258728 0.129193 -0.243084 -0.058467 2 1
120 0.715503 0.871839 0.292274 0.271363 -0.696526 0.777498 2.464116 -0.386285 1.685524 1.576706 0.185429 0.140475 0.421924 2 1
121 2.417611 0.241031 1.233666 0.460035 0.057428 0.040149 3.378156 0.381120 -0.121501 -0.324116 -0.176822 1.227364 0.614724 2 1
122 -0.318929 -0.810559 -0.844588 0.201697 -0.001562 0.245109 0.080448 -0.549388 1.103198 0.291492 0.110564 -0.673124 -1.460988 0 1
123 0.103167 -0.475246 0.116339 -0.525138 -0.644659 0.818829 0.559379 -0.142633 -0.157352 -0.469393 0.012773 -0.547972 -0.137509 0 1
124 -0.094617 -0.605067 -0.742357 0.268847 0.524751 0.458324 0.276908 -0.302065 0.138633 0.644511 0.740681 -0.411278 -0.888780 0 1
125 -1.595914 -0.979410 -0.416646 0.594734 -0.542828 0.792587 -1.964236 -0.008409 -0.175168 0.129402 -0.381751 0.557976 0.657622 1 1
126 -1.902096 -0.148093 -0.337830 -0.273661 -0.493964 -0.088475 -0.220561 -0.292045 -0.103285 0.166066 0.662068 0.766244 0.425791 1 1
127 -1.605568 -0.554121 0.460876 1.019950 -0.718272 1.457022 -1.697711 0.811490 -0.088705 -0.660478 -0.973868 0.581804 0.414185 1 1
128 0.566475 -0.266552 -0.411015 -1.148969 0.130007 -0.378875 -0.472183 -1.264777 0.333529 -0.198779 0.021059 -0.888067 0.082910 0 1
129 0.526340 -0.028603 -0.164620 -1.269989 0.446406 -0.087302 -0.000812 -1.253016 -0.043573 -0.441529 -0.260177 -1.155496 0.048018 0 1
130 -0.506596 -0.917555 0.022348 -2.928480 -0.397810 1.517764 -2.223896 -3.454969 -0.250982 2.385778 2.673937 0.852384 0.503967 1 1
131 0.241077 0.331430 -0.183360 0.450777 1.050602 1.030703 -0.632082 -0.613020 -0.605923 -0.445150 0.601823 -0.068405 -0.239919 2 1
132 0.384872 0.341763 0.211728 -0.437357 1.099689 2.173078 -0.523080 0.221203 1.791178 -0.924414 0.882953 -0.116331 -0.105512 2 1
133 0.926004 -0.715730 0.121647 -0.799985 -1.043066 -1.309977 0.089931 0.501863 1.200490 0.946479 0.949711 0.122093 -0.106795 0 1
134 0.108168 -0.201129 -0.571770 -0.710757 -0.229892 -0.308178 1.035292 0.063963 -0.410105 0.116893 1.018084 -0.213796 0.002336 0 1
135 -0.243379 0.039038 -0.185527 0.341086 0.733620 -1.255089 0.422881 0.253117 1.161221 0.252360 0.377116 -0.444191 0.208913 0 1
136 1.306831 0.220282 -0.386785 -1.153056 1.946157 1.302925 -1.248915 -0.883391 -0.265715 1.982123 1.319726 0.442799 1.500702 2 1
137 1.247354 0.605285 -0.077094 0.407727 1.841346 -0.789475 -1.501921 0.627200 1.160569 -0.001277 -0.516474 0.653115 1.248304 2 1
138 0.708935 -1.127949 0.861675 -1.147026 -0.241837 -1.657997 -0.127631 2.115046 -0.340085 1.759678 -0.965027 1.205391 0.879613 1 1
139 0.708839 -0.956820 -1.350334 -1.562796 0.707567 -0.389784 -2.197459 2.741627 1.474984 -0.351995 1.728271 1.249063 0.769635 1 1
140 1.483186 0.682555 -0.612419 0.474508 0.910933 1.248228 0.176444 0.490360 1.208370 -0.434267 -0.260485 -0.319060 0.067114 2 1
141 0.201126 0.561986 0.031425 0.091477 0.565265 1.039870 0.116405 -0.484695 -1.094180 -1.320674 0.773891 0.702095 0.386850 2 1
142 0.003924 -1.301791 -0.501001 1.193249 -0.495635 0.002788 -1.372557 -0.559755 -0.221626 -0.659689 0.093759 -0.130319 0.035051 1 1
143 -0.055580 -0.659181 -1.244063 -0.214578 0.114523 -0.633686 -0.516044 0.580491 1.035424 0.468185 0.471840 1.199764 0.150708 1 1

144 rows × 15 columns

In [159]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[159]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b8277fe668>
In [160]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

Specialized

ANN

In [97]:
X = df_n_ps_std_mfcc[4]
In [98]:
y = df_n_ps[4]['chosen']
In [99]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [100]:
X_train.shape
Out[100]:
(164, 13)
In [101]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [102]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [103]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [168]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.008, 'max_iter': 300}, que permiten obtener un Accuracy de 68.29% y un Kappa del 35.96
Tiempo total: 24.73 minutos
In [104]:
grid.best_params_={'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.008, 'max_iter': 300}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [105]:
input_tensor = Input(shape = (n0,))
In [106]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [107]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [108]:
model.summary()
Model: "model_5"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_6 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_15 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_16 (Dense)             (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
In [109]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 164 samples, validate on 55 samples
Epoch 1/300
164/164 [==============================] - 0s 1ms/step - loss: 0.7239 - accuracy: 0.4939 - val_loss: 0.7181 - val_accuracy: 0.4727
Epoch 2/300
164/164 [==============================] - 0s 73us/step - loss: 0.6690 - accuracy: 0.6159 - val_loss: 0.6907 - val_accuracy: 0.5091
Epoch 3/300
164/164 [==============================] - 0s 73us/step - loss: 0.6550 - accuracy: 0.6341 - val_loss: 0.6686 - val_accuracy: 0.5636
Epoch 4/300
164/164 [==============================] - 0s 73us/step - loss: 0.6522 - accuracy: 0.6646 - val_loss: 0.6564 - val_accuracy: 0.5818
Epoch 5/300
164/164 [==============================] - 0s 73us/step - loss: 0.6453 - accuracy: 0.6951 - val_loss: 0.6496 - val_accuracy: 0.5636
Epoch 6/300
164/164 [==============================] - 0s 61us/step - loss: 0.6334 - accuracy: 0.6829 - val_loss: 0.6514 - val_accuracy: 0.5818
Epoch 7/300
164/164 [==============================] - 0s 73us/step - loss: 0.6312 - accuracy: 0.6951 - val_loss: 0.6566 - val_accuracy: 0.5818
Epoch 8/300
164/164 [==============================] - 0s 73us/step - loss: 0.6219 - accuracy: 0.6951 - val_loss: 0.6593 - val_accuracy: 0.5455
Epoch 9/300
164/164 [==============================] - 0s 73us/step - loss: 0.6101 - accuracy: 0.6951 - val_loss: 0.6743 - val_accuracy: 0.5818
Epoch 10/300
164/164 [==============================] - 0s 79us/step - loss: 0.6074 - accuracy: 0.6646 - val_loss: 0.6942 - val_accuracy: 0.5455
Epoch 11/300
164/164 [==============================] - 0s 67us/step - loss: 0.5983 - accuracy: 0.6768 - val_loss: 0.6874 - val_accuracy: 0.5273
Epoch 12/300
164/164 [==============================] - 0s 73us/step - loss: 0.5875 - accuracy: 0.7195 - val_loss: 0.6730 - val_accuracy: 0.5455
Epoch 13/300
164/164 [==============================] - 0s 73us/step - loss: 0.5814 - accuracy: 0.7073 - val_loss: 0.6623 - val_accuracy: 0.5636
Epoch 14/300
164/164 [==============================] - 0s 79us/step - loss: 0.5710 - accuracy: 0.7134 - val_loss: 0.6570 - val_accuracy: 0.5636

Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.004000000189989805.
Epoch 15/300
164/164 [==============================] - 0s 73us/step - loss: 0.5651 - accuracy: 0.7012 - val_loss: 0.6549 - val_accuracy: 0.5818
Epoch 16/300
164/164 [==============================] - ETA: 0s - loss: 0.5508 - accuracy: 0.78 - 0s 67us/step - loss: 0.5600 - accuracy: 0.7378 - val_loss: 0.6584 - val_accuracy: 0.6000
Epoch 17/300
164/164 [==============================] - 0s 67us/step - loss: 0.5555 - accuracy: 0.7317 - val_loss: 0.6597 - val_accuracy: 0.6182
Epoch 18/300
164/164 [==============================] - 0s 67us/step - loss: 0.5514 - accuracy: 0.7622 - val_loss: 0.6634 - val_accuracy: 0.6000
Epoch 19/300
164/164 [==============================] - 0s 79us/step - loss: 0.5490 - accuracy: 0.7744 - val_loss: 0.6709 - val_accuracy: 0.5273
Epoch 20/300
164/164 [==============================] - 0s 73us/step - loss: 0.5431 - accuracy: 0.7683 - val_loss: 0.6761 - val_accuracy: 0.5273
Epoch 21/300
164/164 [==============================] - 0s 79us/step - loss: 0.5414 - accuracy: 0.7622 - val_loss: 0.6769 - val_accuracy: 0.5273
Epoch 22/300
164/164 [==============================] - 0s 73us/step - loss: 0.5387 - accuracy: 0.7561 - val_loss: 0.6754 - val_accuracy: 0.5273
Epoch 23/300
164/164 [==============================] - 0s 79us/step - loss: 0.5317 - accuracy: 0.7622 - val_loss: 0.6715 - val_accuracy: 0.5455
Epoch 24/300
164/164 [==============================] - 0s 67us/step - loss: 0.5278 - accuracy: 0.7561 - val_loss: 0.6652 - val_accuracy: 0.5636
Epoch 25/300
164/164 [==============================] - 0s 67us/step - loss: 0.5216 - accuracy: 0.7805 - val_loss: 0.6646 - val_accuracy: 0.6000
Epoch 26/300
164/164 [==============================] - 0s 73us/step - loss: 0.5176 - accuracy: 0.7988 - val_loss: 0.6656 - val_accuracy: 0.6182
Epoch 27/300
164/164 [==============================] - 0s 79us/step - loss: 0.5137 - accuracy: 0.8049 - val_loss: 0.6732 - val_accuracy: 0.6182

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.0020000000949949026.
Epoch 28/300
164/164 [==============================] - 0s 79us/step - loss: 0.5110 - accuracy: 0.8110 - val_loss: 0.6759 - val_accuracy: 0.5818
Epoch 29/300
164/164 [==============================] - 0s 79us/step - loss: 0.5085 - accuracy: 0.8110 - val_loss: 0.6713 - val_accuracy: 0.5636
Epoch 30/300
164/164 [==============================] - 0s 79us/step - loss: 0.5073 - accuracy: 0.7988 - val_loss: 0.6656 - val_accuracy: 0.5818
Epoch 31/300
164/164 [==============================] - 0s 67us/step - loss: 0.5058 - accuracy: 0.7988 - val_loss: 0.6638 - val_accuracy: 0.5818
Epoch 32/300
164/164 [==============================] - 0s 61us/step - loss: 0.5030 - accuracy: 0.8171 - val_loss: 0.6598 - val_accuracy: 0.5818
Epoch 33/300
164/164 [==============================] - 0s 73us/step - loss: 0.5001 - accuracy: 0.8171 - val_loss: 0.6587 - val_accuracy: 0.6000
Epoch 34/300
164/164 [==============================] - 0s 85us/step - loss: 0.4975 - accuracy: 0.8171 - val_loss: 0.6580 - val_accuracy: 0.6000
Epoch 35/300
164/164 [==============================] - 0s 85us/step - loss: 0.4947 - accuracy: 0.8110 - val_loss: 0.6555 - val_accuracy: 0.6182
Epoch 36/300
164/164 [==============================] - 0s 85us/step - loss: 0.4924 - accuracy: 0.8232 - val_loss: 0.6522 - val_accuracy: 0.6182
Epoch 37/300
164/164 [==============================] - 0s 79us/step - loss: 0.4896 - accuracy: 0.8232 - val_loss: 0.6506 - val_accuracy: 0.6364
Epoch 38/300
164/164 [==============================] - 0s 73us/step - loss: 0.4873 - accuracy: 0.8232 - val_loss: 0.6510 - val_accuracy: 0.6364
Epoch 39/300
164/164 [==============================] - 0s 67us/step - loss: 0.4845 - accuracy: 0.8171 - val_loss: 0.6480 - val_accuracy: 0.6364
Epoch 40/300
164/164 [==============================] - 0s 73us/step - loss: 0.4807 - accuracy: 0.8232 - val_loss: 0.6458 - val_accuracy: 0.6364
Epoch 41/300
164/164 [==============================] - 0s 73us/step - loss: 0.4782 - accuracy: 0.8354 - val_loss: 0.6433 - val_accuracy: 0.6364
Epoch 42/300
164/164 [==============================] - 0s 79us/step - loss: 0.4758 - accuracy: 0.8354 - val_loss: 0.6435 - val_accuracy: 0.6364
Epoch 43/300
164/164 [==============================] - 0s 104us/step - loss: 0.4742 - accuracy: 0.8354 - val_loss: 0.6458 - val_accuracy: 0.6364
Epoch 44/300
164/164 [==============================] - 0s 91us/step - loss: 0.4713 - accuracy: 0.8415 - val_loss: 0.6430 - val_accuracy: 0.6364
Epoch 45/300
164/164 [==============================] - 0s 79us/step - loss: 0.4686 - accuracy: 0.8476 - val_loss: 0.6421 - val_accuracy: 0.6182
Epoch 46/300
164/164 [==============================] - 0s 79us/step - loss: 0.4657 - accuracy: 0.8476 - val_loss: 0.6387 - val_accuracy: 0.6182
Epoch 47/300
164/164 [==============================] - 0s 67us/step - loss: 0.4637 - accuracy: 0.8537 - val_loss: 0.6390 - val_accuracy: 0.6182

Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 48/300
164/164 [==============================] - 0s 61us/step - loss: 0.4612 - accuracy: 0.8537 - val_loss: 0.6392 - val_accuracy: 0.6182
Epoch 49/300
164/164 [==============================] - 0s 67us/step - loss: 0.4601 - accuracy: 0.8537 - val_loss: 0.6410 - val_accuracy: 0.6182
Epoch 50/300
164/164 [==============================] - 0s 79us/step - loss: 0.4588 - accuracy: 0.8476 - val_loss: 0.6427 - val_accuracy: 0.6182
Epoch 51/300
164/164 [==============================] - 0s 79us/step - loss: 0.4577 - accuracy: 0.8415 - val_loss: 0.6431 - val_accuracy: 0.6182
Epoch 52/300
164/164 [==============================] - 0s 73us/step - loss: 0.4562 - accuracy: 0.8537 - val_loss: 0.6440 - val_accuracy: 0.6182
Epoch 53/300
164/164 [==============================] - 0s 79us/step - loss: 0.4550 - accuracy: 0.8537 - val_loss: 0.6443 - val_accuracy: 0.6182
Epoch 54/300
164/164 [==============================] - 0s 67us/step - loss: 0.4539 - accuracy: 0.8537 - val_loss: 0.6446 - val_accuracy: 0.6182
Epoch 55/300
164/164 [==============================] - 0s 67us/step - loss: 0.4526 - accuracy: 0.8537 - val_loss: 0.6444 - val_accuracy: 0.6182
Epoch 56/300
164/164 [==============================] - 0s 79us/step - loss: 0.4512 - accuracy: 0.8537 - val_loss: 0.6449 - val_accuracy: 0.6182
Epoch 57/300
164/164 [==============================] - 0s 79us/step - loss: 0.4500 - accuracy: 0.8537 - val_loss: 0.6450 - val_accuracy: 0.6182

Epoch 00057: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 58/300
164/164 [==============================] - 0s 73us/step - loss: 0.4492 - accuracy: 0.8659 - val_loss: 0.6445 - val_accuracy: 0.6182
Epoch 59/300
164/164 [==============================] - 0s 73us/step - loss: 0.4485 - accuracy: 0.8659 - val_loss: 0.6441 - val_accuracy: 0.6182
Epoch 60/300
164/164 [==============================] - 0s 73us/step - loss: 0.4481 - accuracy: 0.8659 - val_loss: 0.6449 - val_accuracy: 0.6182
Epoch 61/300
164/164 [==============================] - 0s 79us/step - loss: 0.4471 - accuracy: 0.8659 - val_loss: 0.6459 - val_accuracy: 0.6182
Epoch 62/300
164/164 [==============================] - 0s 91us/step - loss: 0.4467 - accuracy: 0.8659 - val_loss: 0.6465 - val_accuracy: 0.6182
Epoch 63/300
164/164 [==============================] - 0s 85us/step - loss: 0.4459 - accuracy: 0.8659 - val_loss: 0.6467 - val_accuracy: 0.6182
Epoch 64/300
164/164 [==============================] - 0s 61us/step - loss: 0.4453 - accuracy: 0.8659 - val_loss: 0.6471 - val_accuracy: 0.6182
Epoch 65/300
164/164 [==============================] - 0s 67us/step - loss: 0.4448 - accuracy: 0.8659 - val_loss: 0.6470 - val_accuracy: 0.6182
Epoch 66/300
164/164 [==============================] - 0s 73us/step - loss: 0.4440 - accuracy: 0.8659 - val_loss: 0.6466 - val_accuracy: 0.6182
Epoch 67/300
164/164 [==============================] - 0s 73us/step - loss: 0.4439 - accuracy: 0.8659 - val_loss: 0.6464 - val_accuracy: 0.6182

Epoch 00067: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 68/300
164/164 [==============================] - 0s 73us/step - loss: 0.4429 - accuracy: 0.8659 - val_loss: 0.6462 - val_accuracy: 0.6182
Epoch 69/300
164/164 [==============================] - 0s 67us/step - loss: 0.4427 - accuracy: 0.8659 - val_loss: 0.6457 - val_accuracy: 0.6182
Epoch 70/300
164/164 [==============================] - 0s 61us/step - loss: 0.4424 - accuracy: 0.8659 - val_loss: 0.6451 - val_accuracy: 0.6182
Epoch 71/300
164/164 [==============================] - 0s 73us/step - loss: 0.4420 - accuracy: 0.8659 - val_loss: 0.6447 - val_accuracy: 0.6182
Epoch 72/300
164/164 [==============================] - 0s 73us/step - loss: 0.4417 - accuracy: 0.8659 - val_loss: 0.6438 - val_accuracy: 0.6182
Epoch 73/300
164/164 [==============================] - 0s 79us/step - loss: 0.4414 - accuracy: 0.8659 - val_loss: 0.6431 - val_accuracy: 0.6182
Epoch 74/300
164/164 [==============================] - 0s 79us/step - loss: 0.4411 - accuracy: 0.8720 - val_loss: 0.6425 - val_accuracy: 0.6182
Epoch 75/300
164/164 [==============================] - 0s 67us/step - loss: 0.4407 - accuracy: 0.8720 - val_loss: 0.6418 - val_accuracy: 0.6182
Epoch 76/300
164/164 [==============================] - 0s 67us/step - loss: 0.4405 - accuracy: 0.8720 - val_loss: 0.6414 - val_accuracy: 0.6182
Epoch 77/300
164/164 [==============================] - 0s 73us/step - loss: 0.4402 - accuracy: 0.8720 - val_loss: 0.6411 - val_accuracy: 0.6182

Epoch 00077: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 78/300
164/164 [==============================] - 0s 128us/step - loss: 0.4399 - accuracy: 0.8720 - val_loss: 0.6412 - val_accuracy: 0.6182
Epoch 79/300
164/164 [==============================] - 0s 110us/step - loss: 0.4397 - accuracy: 0.8720 - val_loss: 0.6411 - val_accuracy: 0.6182
Epoch 80/300
164/164 [==============================] - 0s 110us/step - loss: 0.4396 - accuracy: 0.8720 - val_loss: 0.6411 - val_accuracy: 0.6182
Epoch 81/300
164/164 [==============================] - 0s 183us/step - loss: 0.4394 - accuracy: 0.8720 - val_loss: 0.6412 - val_accuracy: 0.6182
Epoch 82/300
164/164 [==============================] - 0s 158us/step - loss: 0.4393 - accuracy: 0.8720 - val_loss: 0.6414 - val_accuracy: 0.6182
Epoch 83/300
164/164 [==============================] - 0s 110us/step - loss: 0.4391 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 84/300
164/164 [==============================] - 0s 104us/step - loss: 0.4390 - accuracy: 0.8720 - val_loss: 0.6419 - val_accuracy: 0.6182
Epoch 85/300
164/164 [==============================] - 0s 116us/step - loss: 0.4389 - accuracy: 0.8720 - val_loss: 0.6422 - val_accuracy: 0.6182
Epoch 86/300
164/164 [==============================] - 0s 98us/step - loss: 0.4387 - accuracy: 0.8720 - val_loss: 0.6421 - val_accuracy: 0.6182
Epoch 87/300
164/164 [==============================] - 0s 104us/step - loss: 0.4385 - accuracy: 0.8720 - val_loss: 0.6420 - val_accuracy: 0.6182

Epoch 00087: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 88/300
164/164 [==============================] - 0s 98us/step - loss: 0.4384 - accuracy: 0.8720 - val_loss: 0.6419 - val_accuracy: 0.6182
Epoch 89/300
164/164 [==============================] - 0s 98us/step - loss: 0.4383 - accuracy: 0.8720 - val_loss: 0.6419 - val_accuracy: 0.6182
Epoch 90/300
164/164 [==============================] - 0s 122us/step - loss: 0.4382 - accuracy: 0.8720 - val_loss: 0.6419 - val_accuracy: 0.6182
Epoch 91/300
164/164 [==============================] - 0s 116us/step - loss: 0.4381 - accuracy: 0.8720 - val_loss: 0.6418 - val_accuracy: 0.6182
Epoch 92/300
164/164 [==============================] - 0s 116us/step - loss: 0.4380 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 93/300
164/164 [==============================] - 0s 98us/step - loss: 0.4379 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 94/300
164/164 [==============================] - 0s 104us/step - loss: 0.4379 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 95/300
164/164 [==============================] - 0s 98us/step - loss: 0.4378 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 96/300
164/164 [==============================] - 0s 116us/step - loss: 0.4377 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 97/300
164/164 [==============================] - 0s 98us/step - loss: 0.4376 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182

Epoch 00097: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 98/300
164/164 [==============================] - ETA: 0s - loss: 0.4724 - accuracy: 0.81 - 0s 98us/step - loss: 0.4375 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 99/300
164/164 [==============================] - 0s 104us/step - loss: 0.4375 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 100/300
164/164 [==============================] - 0s 104us/step - loss: 0.4375 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 101/300
164/164 [==============================] - 0s 98us/step - loss: 0.4374 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 102/300
164/164 [==============================] - 0s 104us/step - loss: 0.4374 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 103/300
164/164 [==============================] - 0s 98us/step - loss: 0.4373 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 104/300
164/164 [==============================] - 0s 98us/step - loss: 0.4373 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 105/300
164/164 [==============================] - 0s 98us/step - loss: 0.4373 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 106/300
164/164 [==============================] - 0s 110us/step - loss: 0.4372 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 107/300
164/164 [==============================] - 0s 104us/step - loss: 0.4372 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182

Epoch 00107: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 108/300
164/164 [==============================] - 0s 97us/step - loss: 0.4371 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 109/300
164/164 [==============================] - 0s 104us/step - loss: 0.4371 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 110/300
164/164 [==============================] - 0s 104us/step - loss: 0.4371 - accuracy: 0.8720 - val_loss: 0.6417 - val_accuracy: 0.6182
Epoch 111/300
164/164 [==============================] - 0s 104us/step - loss: 0.4371 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 112/300
164/164 [==============================] - 0s 104us/step - loss: 0.4371 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 113/300
164/164 [==============================] - 0s 110us/step - loss: 0.4370 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 114/300
164/164 [==============================] - 0s 110us/step - loss: 0.4370 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 115/300
164/164 [==============================] - 0s 128us/step - loss: 0.4370 - accuracy: 0.8720 - val_loss: 0.6416 - val_accuracy: 0.6182
Epoch 116/300
164/164 [==============================] - 0s 122us/step - loss: 0.4370 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 117/300
164/164 [==============================] - 0s 122us/step - loss: 0.4370 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00117: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 118/300
164/164 [==============================] - 0s 104us/step - loss: 0.4370 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 119/300
164/164 [==============================] - 0s 104us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 120/300
164/164 [==============================] - 0s 97us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 121/300
164/164 [==============================] - 0s 97us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 122/300
164/164 [==============================] - 0s 91us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 123/300
164/164 [==============================] - 0s 104us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 124/300
164/164 [==============================] - 0s 122us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 125/300
164/164 [==============================] - 0s 98us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 126/300
164/164 [==============================] - 0s 98us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 127/300
164/164 [==============================] - 0s 98us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00127: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 128/300
164/164 [==============================] - 0s 104us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 129/300
164/164 [==============================] - 0s 91us/step - loss: 0.4369 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 130/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 131/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 132/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 133/300
164/164 [==============================] - 0s 225us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 134/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 135/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 136/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 137/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00137: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 138/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 139/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 140/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 141/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 142/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 143/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 144/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 145/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 146/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 147/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00147: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 148/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 149/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 150/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 151/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 152/300
164/164 [==============================] - 0s 134us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 153/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 154/300
164/164 [==============================] - 0s 134us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 155/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 156/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 157/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00157: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 158/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 159/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 160/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 161/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 162/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 163/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 164/300
164/164 [==============================] - 0s 134us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 165/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 166/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 167/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00167: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 168/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 169/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 170/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 171/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 172/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 173/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 174/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 175/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 176/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 177/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00177: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 178/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 179/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 180/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 181/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 182/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 183/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 184/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 185/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 186/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 187/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00187: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 188/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 189/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 190/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 191/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 192/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 193/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 194/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 195/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 196/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 197/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00197: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 198/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 199/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 200/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 201/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 202/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 203/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 204/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 205/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 206/300
164/164 [==============================] - 0s 244us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 207/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00207: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 208/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 209/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 210/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 211/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 212/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 213/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 214/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 215/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 216/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 217/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00217: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 218/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 219/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 220/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 221/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 222/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 223/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 224/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 225/300
164/164 [==============================] - ETA: 0s - loss: 0.4141 - accuracy: 0.90 - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 226/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 227/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00227: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 228/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 229/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 230/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 231/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 232/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 233/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 234/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 235/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 236/300
164/164 [==============================] - 0s 262us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 237/300
164/164 [==============================] - 0s 122us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00237: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 238/300
164/164 [==============================] - 0s 158us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 239/300
164/164 [==============================] - 0s 177us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 240/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 241/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 242/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 243/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 244/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 245/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 246/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 247/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00247: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 248/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 249/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 250/300
164/164 [==============================] - 0s 128us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 251/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 252/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 253/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 254/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 255/300
164/164 [==============================] - 0s 85us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 256/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 257/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00257: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 258/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 259/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 260/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 261/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 262/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 263/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 264/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 265/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 266/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 267/300
164/164 [==============================] - 0s 134us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00267: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 268/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 269/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 270/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 271/300
164/164 [==============================] - 0s 116us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 272/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 273/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 274/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 275/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 276/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 277/300
164/164 [==============================] - 0s 110us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00277: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 278/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 279/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 280/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 281/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 282/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 283/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 284/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 285/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 286/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 287/300
164/164 [==============================] - 0s 97us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00287: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 288/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 289/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 290/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 291/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 292/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 293/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 294/300
164/164 [==============================] - 0s 98us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 295/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 296/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 297/300
164/164 [==============================] - 0s 104us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182

Epoch 00297: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 298/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 299/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
Epoch 300/300
164/164 [==============================] - 0s 91us/step - loss: 0.4368 - accuracy: 0.8720 - val_loss: 0.6415 - val_accuracy: 0.6182
In [110]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 300)
In [111]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
55/55 [==============================] - 0s 73us/step
test loss: 0.6415141766721552, test accuracy: 0.6181818246841431
In [112]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6970899470899471
In [113]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.2325581395348838
[[21  7]
 [14 13]]

KMeans

In [179]:
X
Out[179]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 0.992062 -0.477172 -1.079451 -2.369470 -1.705431 -0.098594 -0.281836 -1.432001 -0.898623 0.130446 -0.024683 -0.312128 0.020392
1 0.843575 -0.507672 -0.731713 -0.334904 1.442336 -0.491141 -0.266416 -0.511246 1.004414 0.558777 0.127114 -1.667555 0.835458
2 0.816922 -0.263544 0.639646 -0.865417 1.276602 -0.245238 0.106722 -0.761365 -0.170481 -1.443667 -0.451102 1.196430 -0.037846
3 4.368525 0.851784 -0.671158 -0.128467 2.141169 -0.472725 -1.437233 -1.858760 1.581800 -0.145852 0.107228 1.458238 1.666081
4 0.001312 0.535305 -0.648296 0.221414 0.549478 0.736878 -0.439538 -0.138787 0.584258 0.095671 1.901833 2.909252 1.802578
5 -0.236754 0.488978 0.203743 0.088401 -0.151814 0.811707 -0.092973 0.153518 -0.936863 0.354100 0.123352 1.318569 1.097711
6 -0.842496 0.742173 0.068601 1.394492 -0.276167 1.301853 0.336343 1.077540 -1.118983 1.688235 -0.103661 1.224883 0.350956
7 -0.952702 1.078642 -0.563379 -0.018149 -0.073042 -0.591301 -1.392389 0.209234 0.725065 0.064350 0.034449 0.581953 2.151966
8 0.046457 -0.093025 -0.804385 0.542662 -0.130939 0.042792 1.198959 -0.559116 0.017192 -0.249308 0.747851 -0.035599 0.995166
9 -0.781158 0.099463 0.196737 2.462131 0.316140 -0.369698 2.196715 -0.800443 2.137687 1.438443 0.055279 -0.284437 1.702942
10 -0.906167 0.568017 0.700382 2.876646 -0.809125 -0.491839 1.801564 -2.406947 1.939246 1.397556 0.709408 -0.423394 1.773713
11 1.172687 1.292213 -0.402038 0.087342 0.324539 0.973336 -0.548282 0.781195 0.846038 0.464514 -1.030463 -0.559243 0.168727
12 0.367875 1.949889 0.516382 0.657124 -0.534306 0.575187 -0.750861 0.247200 -0.232297 0.332174 -0.426787 0.318763 0.083316
13 1.270520 1.194102 0.267933 0.676186 0.394734 -0.709975 -0.047626 1.113385 0.339962 0.424937 -0.528480 0.671225 0.078062
14 -0.095931 0.792392 0.626113 0.189989 0.315198 -0.175744 0.011713 -0.072196 0.742338 0.974567 0.935685 0.083454 0.970157
15 -0.322645 0.977766 0.685697 0.670670 0.997903 0.619018 0.498110 -0.016728 0.445370 -0.102204 0.199517 -0.315303 0.347920
16 0.565974 0.440551 0.402995 1.815814 1.906139 1.105013 1.256180 0.907086 0.592851 -0.159427 1.013051 -0.620202 1.259932
17 -0.863540 0.887127 1.387720 -0.082168 -0.694633 -0.810037 1.251697 -0.443532 0.307506 0.253798 -0.292483 0.030812 0.176350
18 -0.822258 -0.630193 -0.672294 -0.279417 -0.731983 -1.510167 -1.393705 -0.161872 0.722297 0.910604 -0.610303 0.380547 1.296315
19 -0.889164 0.641922 2.278761 0.190213 -0.341231 -0.624107 1.228820 -0.549441 -0.662942 0.481866 -0.541347 -1.061735 -0.122227
20 0.795964 0.484784 0.898919 0.027625 0.415359 0.271286 0.366966 -0.498975 0.300352 0.216702 0.361195 -0.771976 0.085971
21 0.168183 -0.077353 1.019887 -0.637065 0.731534 0.877245 1.225125 -0.566997 -0.452222 -1.105384 0.185636 -0.782808 -0.224975
22 0.510023 -0.099060 0.064384 -0.039933 0.786951 0.119530 -0.259052 -0.881354 -0.113425 1.191274 0.335443 -0.189618 -0.337688
23 0.216210 -0.069447 0.974822 -0.626273 0.835854 0.914236 1.226463 -0.369525 -0.398299 -1.146613 0.026274 -0.944475 -0.192948
24 -0.239273 -0.518568 -0.127834 0.045011 0.403223 0.368253 -0.584902 -0.905436 -0.405699 0.129383 0.809611 -0.174138 -0.115393
25 -1.241907 1.355534 -0.693470 0.793789 0.606007 0.930263 0.009323 -0.712463 0.037916 -0.182143 1.212760 -0.083882 0.639662
26 -0.847436 1.180146 -0.489592 1.189572 -0.457645 -0.163979 -0.010812 -0.765561 -0.347488 -0.216575 0.804302 -0.236378 0.481212
27 -0.378383 1.017722 -1.812001 0.443514 0.583209 1.709730 0.715521 -0.076610 0.416120 0.013436 0.420025 -0.925263 0.626400
28 0.245370 1.187084 1.056929 2.013063 -0.505622 1.228583 -1.158143 0.622932 0.113512 0.948397 0.008252 1.035839 -0.691702
29 -0.623386 1.368898 1.216933 1.961377 0.744541 1.555516 -1.205283 -0.252995 -0.325624 0.538668 0.197646 0.356450 -0.219812
... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.565077 0.809784 0.557457 0.815038 0.823053 -0.931359 -0.039244 -0.199068 0.083690 -0.235063 -0.030800 -0.564557 -0.253507
190 -0.602848 0.638838 0.763481 -0.424641 -0.810302 -0.951734 -0.732024 -0.504038 0.379372 0.748895 -0.593820 -0.772491 0.175752
191 -1.094031 -0.896961 0.400325 -1.635971 -1.099938 -1.091799 -0.593281 0.890889 0.984647 0.584509 0.318496 0.175062 -0.783524
192 -0.348357 0.944340 0.239675 0.003612 -1.370450 -0.996597 -0.616405 0.161481 -0.258760 0.534721 -0.431338 0.376456 -1.623026
193 2.110671 -1.005236 0.268022 0.459390 -1.985350 0.405677 -0.361571 -1.272053 -0.873345 2.111218 -0.246708 0.798456 1.067252
194 1.222194 -1.600122 -1.149302 0.230839 -0.213026 -1.572114 0.486447 -0.770701 0.244895 2.689114 -2.296486 0.718338 -1.220356
195 -0.509789 -0.757711 0.189267 0.516644 0.750906 -1.485714 2.485824 -1.204754 -3.373113 -0.450016 -1.091178 -0.474728 -0.522197
196 0.194175 -0.618441 -1.090420 0.233017 -1.492602 -0.342192 -1.612833 0.714990 0.072755 -0.026932 0.464029 0.212333 1.204262
197 0.297635 -0.727616 -1.927078 -0.145347 -0.990256 0.052935 -1.791108 -0.351333 -0.064903 0.201842 1.581215 1.084453 -0.168841
198 -0.271030 -0.575137 -1.005334 -0.238705 -0.931830 -1.319114 -0.668613 0.510822 0.209623 0.487577 0.154874 0.133768 1.259548
199 0.059096 -0.370313 -0.760047 0.706270 -2.488266 -1.336692 -0.683584 0.436366 -0.150281 -0.711308 -0.851205 0.253942 -0.052516
200 0.147539 -0.233608 -0.578016 0.870637 -2.418094 -1.286070 -0.692623 0.342693 0.015890 -0.795418 -1.221248 0.309493 -0.526480
201 -0.076214 -1.055629 0.159389 -0.403318 -0.111273 -1.325990 -0.867502 0.519381 0.192007 -0.024629 0.220420 0.551046 0.399728
202 1.468986 0.518464 1.475456 -1.400891 0.408186 -1.831201 1.474742 0.566660 -0.403197 -1.295176 -0.443787 -1.884346 -1.993491
203 -1.739107 0.192104 -0.670709 -1.236237 -1.672915 -0.680127 0.027148 0.524909 1.865754 -0.634310 -0.607429 -1.471191 -0.632982
204 -0.663868 -0.862566 -0.329803 -0.857680 0.167824 -0.013328 0.176565 0.125832 0.609671 -1.296827 -0.435986 -1.341223 -0.977207
205 -0.739818 -0.668220 -0.077479 0.026286 0.027801 0.040659 -0.161646 -1.046948 -1.248976 -0.449243 1.046834 1.381194 1.646325
206 0.475752 0.695473 -0.072097 1.081397 -0.366985 -2.008080 0.515734 0.005330 1.193800 -0.841825 -2.650200 -3.862624 -2.115507
207 -1.331365 -1.632552 -0.876636 0.076190 1.187799 1.138590 1.235955 1.583447 0.890342 -1.587964 0.546109 1.565567 1.756993
208 -0.397476 0.090963 1.217996 0.773741 1.107204 -1.125870 -0.915396 -1.130561 -1.914456 -0.664474 -0.226576 0.112420 0.235011
209 -0.465823 -1.372705 -0.445436 0.316510 -1.492946 -1.103783 0.353513 -0.311377 -1.095388 -0.615078 -0.585868 0.172807 -0.860564
210 -0.594535 -1.761364 -1.069906 -0.502969 -1.411276 -0.906350 -0.559102 -1.240920 -2.254196 -1.206339 -0.528047 0.924112 0.472298
211 -1.022693 0.373374 -0.104205 -0.815628 -0.574733 0.906934 0.765114 -0.015386 0.110695 1.832325 0.712557 -0.951976 -0.678869
212 -0.967902 0.155275 0.013938 -0.549105 -0.907792 0.881907 0.609589 -0.135010 -0.373473 1.152134 0.386511 -0.744687 -0.447017
213 -1.238242 -0.062983 -0.133082 -0.158458 -0.338086 -0.411874 0.964537 0.870379 0.530337 0.858339 0.489332 -1.190977 -1.340484
214 0.349761 -1.391267 -3.069473 0.840195 1.044391 -1.052018 1.004856 1.478511 1.210060 -1.145325 2.653757 1.937234 0.592139
215 0.782819 -1.300386 -0.487318 0.850960 -2.046427 1.050631 0.289069 2.400271 2.707288 -0.278238 0.152360 1.912210 -0.208225
216 1.847553 -1.059174 -0.808403 0.400706 -0.275009 0.409744 -0.141885 0.706348 0.476002 0.990111 -0.168504 0.856440 -0.395652
217 2.608478 0.174234 2.534211 -0.985597 -0.436400 3.751943 1.560179 -2.367095 1.272529 2.464209 -0.954336 0.310720 -1.209456
218 -0.069569 0.418008 -0.004324 1.330358 0.365352 -0.582788 -0.527444 -0.298114 -0.353021 -1.118883 -0.459230 -0.986241 -0.041010

219 rows × 13 columns

In [180]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[180]:
[2847.0,
 2572.5760570812117,
 2370.209947155015,
 2235.6129406180157,
 2112.951551625758,
 2041.1809211260454,
 1982.3615393500422,
 1899.0667595696164,
 1851.9267246215204,
 1760.4468946465518,
 1745.79714786859,
 1689.1350809615656,
 1657.4940102564742,
 1625.370413913055]
In [181]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[181]:
[<matplotlib.lines.Line2D at 0x1b827c5de48>]

K=3

In [182]:
kmeans_mfcc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[182]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [183]:
kmeans_mfcc.labels_
Out[183]:
array([1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 2,
       1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 2,
       0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2,
       2, 0, 1, 2, 0, 0, 2, 2, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 2, 2, 0, 0,
       0, 0, 1, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2,
       1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 2,
       2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1,
       1, 1, 1, 1, 2, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 2, 0, 0, 1, 0, 1])
In [184]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[184]:
array([1, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 2,
       1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 1, 1, 2, 0, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 2,
       0, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 0, 2, 2, 2, 2, 1, 2, 2, 2,
       2, 0, 1, 2, 0, 0, 2, 2, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 2, 2, 0, 0,
       0, 0, 1, 0, 2, 1, 1, 1, 2, 0, 1, 0, 0, 1, 1, 1, 2, 0, 1, 2, 2, 2,
       1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 0, 1, 0, 0, 2, 0, 2, 2, 2, 0, 2,
       2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, 1, 1,
       1, 1, 1, 1, 2, 2, 2, 1, 2, 0, 1, 1, 1, 2, 2, 2, 0, 0, 1, 0, 1])
In [185]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [186]:
X
Out[186]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 0.992062 -0.477172 -1.079451 -2.369470 -1.705431 -0.098594 -0.281836 -1.432001 -0.898623 0.130446 -0.024683 -0.312128 0.020392 1 0
1 0.843575 -0.507672 -0.731713 -0.334904 1.442336 -0.491141 -0.266416 -0.511246 1.004414 0.558777 0.127114 -1.667555 0.835458 2 0
2 0.816922 -0.263544 0.639646 -0.865417 1.276602 -0.245238 0.106722 -0.761365 -0.170481 -1.443667 -0.451102 1.196430 -0.037846 1 0
3 4.368525 0.851784 -0.671158 -0.128467 2.141169 -0.472725 -1.437233 -1.858760 1.581800 -0.145852 0.107228 1.458238 1.666081 1 0
4 0.001312 0.535305 -0.648296 0.221414 0.549478 0.736878 -0.439538 -0.138787 0.584258 0.095671 1.901833 2.909252 1.802578 0 0
5 -0.236754 0.488978 0.203743 0.088401 -0.151814 0.811707 -0.092973 0.153518 -0.936863 0.354100 0.123352 1.318569 1.097711 0 0
6 -0.842496 0.742173 0.068601 1.394492 -0.276167 1.301853 0.336343 1.077540 -1.118983 1.688235 -0.103661 1.224883 0.350956 0 0
7 -0.952702 1.078642 -0.563379 -0.018149 -0.073042 -0.591301 -1.392389 0.209234 0.725065 0.064350 0.034449 0.581953 2.151966 1 0
8 0.046457 -0.093025 -0.804385 0.542662 -0.130939 0.042792 1.198959 -0.559116 0.017192 -0.249308 0.747851 -0.035599 0.995166 0 0
9 -0.781158 0.099463 0.196737 2.462131 0.316140 -0.369698 2.196715 -0.800443 2.137687 1.438443 0.055279 -0.284437 1.702942 0 0
10 -0.906167 0.568017 0.700382 2.876646 -0.809125 -0.491839 1.801564 -2.406947 1.939246 1.397556 0.709408 -0.423394 1.773713 0 0
11 1.172687 1.292213 -0.402038 0.087342 0.324539 0.973336 -0.548282 0.781195 0.846038 0.464514 -1.030463 -0.559243 0.168727 0 0
12 0.367875 1.949889 0.516382 0.657124 -0.534306 0.575187 -0.750861 0.247200 -0.232297 0.332174 -0.426787 0.318763 0.083316 0 0
13 1.270520 1.194102 0.267933 0.676186 0.394734 -0.709975 -0.047626 1.113385 0.339962 0.424937 -0.528480 0.671225 0.078062 0 0
14 -0.095931 0.792392 0.626113 0.189989 0.315198 -0.175744 0.011713 -0.072196 0.742338 0.974567 0.935685 0.083454 0.970157 0 0
15 -0.322645 0.977766 0.685697 0.670670 0.997903 0.619018 0.498110 -0.016728 0.445370 -0.102204 0.199517 -0.315303 0.347920 0 0
16 0.565974 0.440551 0.402995 1.815814 1.906139 1.105013 1.256180 0.907086 0.592851 -0.159427 1.013051 -0.620202 1.259932 0 0
17 -0.863540 0.887127 1.387720 -0.082168 -0.694633 -0.810037 1.251697 -0.443532 0.307506 0.253798 -0.292483 0.030812 0.176350 0 0
18 -0.822258 -0.630193 -0.672294 -0.279417 -0.731983 -1.510167 -1.393705 -0.161872 0.722297 0.910604 -0.610303 0.380547 1.296315 1 0
19 -0.889164 0.641922 2.278761 0.190213 -0.341231 -0.624107 1.228820 -0.549441 -0.662942 0.481866 -0.541347 -1.061735 -0.122227 2 0
20 0.795964 0.484784 0.898919 0.027625 0.415359 0.271286 0.366966 -0.498975 0.300352 0.216702 0.361195 -0.771976 0.085971 0 0
21 0.168183 -0.077353 1.019887 -0.637065 0.731534 0.877245 1.225125 -0.566997 -0.452222 -1.105384 0.185636 -0.782808 -0.224975 2 0
22 0.510023 -0.099060 0.064384 -0.039933 0.786951 0.119530 -0.259052 -0.881354 -0.113425 1.191274 0.335443 -0.189618 -0.337688 1 0
23 0.216210 -0.069447 0.974822 -0.626273 0.835854 0.914236 1.226463 -0.369525 -0.398299 -1.146613 0.026274 -0.944475 -0.192948 2 0
24 -0.239273 -0.518568 -0.127834 0.045011 0.403223 0.368253 -0.584902 -0.905436 -0.405699 0.129383 0.809611 -0.174138 -0.115393 1 0
25 -1.241907 1.355534 -0.693470 0.793789 0.606007 0.930263 0.009323 -0.712463 0.037916 -0.182143 1.212760 -0.083882 0.639662 0 0
26 -0.847436 1.180146 -0.489592 1.189572 -0.457645 -0.163979 -0.010812 -0.765561 -0.347488 -0.216575 0.804302 -0.236378 0.481212 0 0
27 -0.378383 1.017722 -1.812001 0.443514 0.583209 1.709730 0.715521 -0.076610 0.416120 0.013436 0.420025 -0.925263 0.626400 0 0
28 0.245370 1.187084 1.056929 2.013063 -0.505622 1.228583 -1.158143 0.622932 0.113512 0.948397 0.008252 1.035839 -0.691702 0 0
29 -0.623386 1.368898 1.216933 1.961377 0.744541 1.555516 -1.205283 -0.252995 -0.325624 0.538668 0.197646 0.356450 -0.219812 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.565077 0.809784 0.557457 0.815038 0.823053 -0.931359 -0.039244 -0.199068 0.083690 -0.235063 -0.030800 -0.564557 -0.253507 0 1
190 -0.602848 0.638838 0.763481 -0.424641 -0.810302 -0.951734 -0.732024 -0.504038 0.379372 0.748895 -0.593820 -0.772491 0.175752 1 1
191 -1.094031 -0.896961 0.400325 -1.635971 -1.099938 -1.091799 -0.593281 0.890889 0.984647 0.584509 0.318496 0.175062 -0.783524 2 1
192 -0.348357 0.944340 0.239675 0.003612 -1.370450 -0.996597 -0.616405 0.161481 -0.258760 0.534721 -0.431338 0.376456 -1.623026 1 1
193 2.110671 -1.005236 0.268022 0.459390 -1.985350 0.405677 -0.361571 -1.272053 -0.873345 2.111218 -0.246708 0.798456 1.067252 1 1
194 1.222194 -1.600122 -1.149302 0.230839 -0.213026 -1.572114 0.486447 -0.770701 0.244895 2.689114 -2.296486 0.718338 -1.220356 1 1
195 -0.509789 -0.757711 0.189267 0.516644 0.750906 -1.485714 2.485824 -1.204754 -3.373113 -0.450016 -1.091178 -0.474728 -0.522197 2 1
196 0.194175 -0.618441 -1.090420 0.233017 -1.492602 -0.342192 -1.612833 0.714990 0.072755 -0.026932 0.464029 0.212333 1.204262 1 1
197 0.297635 -0.727616 -1.927078 -0.145347 -0.990256 0.052935 -1.791108 -0.351333 -0.064903 0.201842 1.581215 1.084453 -0.168841 1 1
198 -0.271030 -0.575137 -1.005334 -0.238705 -0.931830 -1.319114 -0.668613 0.510822 0.209623 0.487577 0.154874 0.133768 1.259548 1 1
199 0.059096 -0.370313 -0.760047 0.706270 -2.488266 -1.336692 -0.683584 0.436366 -0.150281 -0.711308 -0.851205 0.253942 -0.052516 1 1
200 0.147539 -0.233608 -0.578016 0.870637 -2.418094 -1.286070 -0.692623 0.342693 0.015890 -0.795418 -1.221248 0.309493 -0.526480 1 1
201 -0.076214 -1.055629 0.159389 -0.403318 -0.111273 -1.325990 -0.867502 0.519381 0.192007 -0.024629 0.220420 0.551046 0.399728 1 1
202 1.468986 0.518464 1.475456 -1.400891 0.408186 -1.831201 1.474742 0.566660 -0.403197 -1.295176 -0.443787 -1.884346 -1.993491 2 1
203 -1.739107 0.192104 -0.670709 -1.236237 -1.672915 -0.680127 0.027148 0.524909 1.865754 -0.634310 -0.607429 -1.471191 -0.632982 2 1
204 -0.663868 -0.862566 -0.329803 -0.857680 0.167824 -0.013328 0.176565 0.125832 0.609671 -1.296827 -0.435986 -1.341223 -0.977207 2 1
205 -0.739818 -0.668220 -0.077479 0.026286 0.027801 0.040659 -0.161646 -1.046948 -1.248976 -0.449243 1.046834 1.381194 1.646325 1 1
206 0.475752 0.695473 -0.072097 1.081397 -0.366985 -2.008080 0.515734 0.005330 1.193800 -0.841825 -2.650200 -3.862624 -2.115507 2 1
207 -1.331365 -1.632552 -0.876636 0.076190 1.187799 1.138590 1.235955 1.583447 0.890342 -1.587964 0.546109 1.565567 1.756993 0 1
208 -0.397476 0.090963 1.217996 0.773741 1.107204 -1.125870 -0.915396 -1.130561 -1.914456 -0.664474 -0.226576 0.112420 0.235011 1 1
209 -0.465823 -1.372705 -0.445436 0.316510 -1.492946 -1.103783 0.353513 -0.311377 -1.095388 -0.615078 -0.585868 0.172807 -0.860564 1 1
210 -0.594535 -1.761364 -1.069906 -0.502969 -1.411276 -0.906350 -0.559102 -1.240920 -2.254196 -1.206339 -0.528047 0.924112 0.472298 1 1
211 -1.022693 0.373374 -0.104205 -0.815628 -0.574733 0.906934 0.765114 -0.015386 0.110695 1.832325 0.712557 -0.951976 -0.678869 2 1
212 -0.967902 0.155275 0.013938 -0.549105 -0.907792 0.881907 0.609589 -0.135010 -0.373473 1.152134 0.386511 -0.744687 -0.447017 2 1
213 -1.238242 -0.062983 -0.133082 -0.158458 -0.338086 -0.411874 0.964537 0.870379 0.530337 0.858339 0.489332 -1.190977 -1.340484 2 1
214 0.349761 -1.391267 -3.069473 0.840195 1.044391 -1.052018 1.004856 1.478511 1.210060 -1.145325 2.653757 1.937234 0.592139 0 1
215 0.782819 -1.300386 -0.487318 0.850960 -2.046427 1.050631 0.289069 2.400271 2.707288 -0.278238 0.152360 1.912210 -0.208225 0 1
216 1.847553 -1.059174 -0.808403 0.400706 -0.275009 0.409744 -0.141885 0.706348 0.476002 0.990111 -0.168504 0.856440 -0.395652 1 1
217 2.608478 0.174234 2.534211 -0.985597 -0.436400 3.751943 1.560179 -2.367095 1.272529 2.464209 -0.954336 0.310720 -1.209456 0 1
218 -0.069569 0.418008 -0.004324 1.330358 0.365352 -0.582788 -0.527444 -0.298114 -0.353021 -1.118883 -0.459230 -0.986241 -0.041010 1 1

219 rows × 15 columns

In [187]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[187]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b827cab550>
In [188]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

Urban Place

ANN

In [114]:
X = df_n_ps_std_mfcc[5]
In [115]:
y = df_n_ps[5]['chosen']
In [116]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [117]:
X_train.shape
Out[117]:
(162, 13)
In [118]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [119]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [120]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [196]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.009, 'max_iter': 1000}, que permiten obtener un Accuracy de 72.84% y un Kappa del 19.82
Tiempo total: 33.32 minutos
In [121]:
grid.best_params_= {'activation': 'sigmoid', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.009, 'max_iter': 1000}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [122]:
input_tensor = Input(shape = (n0,))
In [123]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [124]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [125]:
model.summary()
Model: "model_6"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_7 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_17 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_18 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_19 (Dense)             (None, 1)                 31        
=================================================================
Total params: 1,381
Trainable params: 1,381
Non-trainable params: 0
_________________________________________________________________
In [126]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 162 samples, validate on 54 samples
Epoch 1/1000
162/162 [==============================] - 0s 1ms/step - loss: 0.6693 - accuracy: 0.6296 - val_loss: 0.5670 - val_accuracy: 0.7778
Epoch 2/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6681 - accuracy: 0.6296 - val_loss: 0.5417 - val_accuracy: 0.7778
Epoch 3/1000
162/162 [==============================] - 0s 80us/step - loss: 0.6660 - accuracy: 0.6296 - val_loss: 0.6625 - val_accuracy: 0.7407
Epoch 4/1000
162/162 [==============================] - 0s 117us/step - loss: 0.6927 - accuracy: 0.4938 - val_loss: 0.6973 - val_accuracy: 0.3704
Epoch 5/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6667 - accuracy: 0.6296 - val_loss: 0.5859 - val_accuracy: 0.7778
Epoch 6/1000
162/162 [==============================] - 0s 74us/step - loss: 0.6472 - accuracy: 0.6296 - val_loss: 0.5762 - val_accuracy: 0.7778
Epoch 7/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6449 - accuracy: 0.6296 - val_loss: 0.5856 - val_accuracy: 0.7778
Epoch 8/1000
162/162 [==============================] - 0s 74us/step - loss: 0.6488 - accuracy: 0.6358 - val_loss: 0.6235 - val_accuracy: 0.7963
Epoch 9/1000
162/162 [==============================] - 0s 80us/step - loss: 0.6424 - accuracy: 0.6543 - val_loss: 0.5655 - val_accuracy: 0.7778
Epoch 10/1000
162/162 [==============================] - 0s 68us/step - loss: 0.6346 - accuracy: 0.6296 - val_loss: 0.5359 - val_accuracy: 0.7778
Epoch 11/1000
162/162 [==============================] - 0s 74us/step - loss: 0.6399 - accuracy: 0.6296 - val_loss: 0.5650 - val_accuracy: 0.8148
Epoch 12/1000
162/162 [==============================] - 0s 80us/step - loss: 0.6252 - accuracy: 0.6543 - val_loss: 0.5501 - val_accuracy: 0.7778
Epoch 13/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6219 - accuracy: 0.6481 - val_loss: 0.5589 - val_accuracy: 0.8148
Epoch 14/1000
162/162 [==============================] - 0s 80us/step - loss: 0.6241 - accuracy: 0.6420 - val_loss: 0.5368 - val_accuracy: 0.7778
Epoch 15/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6218 - accuracy: 0.6420 - val_loss: 0.5363 - val_accuracy: 0.7963
Epoch 16/1000
162/162 [==============================] - 0s 74us/step - loss: 0.6086 - accuracy: 0.6481 - val_loss: 0.5852 - val_accuracy: 0.7407
Epoch 17/1000
162/162 [==============================] - 0s 68us/step - loss: 0.6120 - accuracy: 0.6173 - val_loss: 0.5719 - val_accuracy: 0.7407
Epoch 18/1000
162/162 [==============================] - 0s 68us/step - loss: 0.6059 - accuracy: 0.6111 - val_loss: 0.5646 - val_accuracy: 0.7407
Epoch 19/1000
162/162 [==============================] - 0s 80us/step - loss: 0.6005 - accuracy: 0.6358 - val_loss: 0.5501 - val_accuracy: 0.7407
Epoch 20/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5992 - accuracy: 0.6296 - val_loss: 0.5583 - val_accuracy: 0.7407
Epoch 21/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5981 - accuracy: 0.6481 - val_loss: 0.6057 - val_accuracy: 0.6296

Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.0044999998062849045.
Epoch 22/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6104 - accuracy: 0.6728 - val_loss: 0.6085 - val_accuracy: 0.6111
Epoch 23/1000
162/162 [==============================] - 0s 86us/step - loss: 0.6104 - accuracy: 0.6728 - val_loss: 0.5901 - val_accuracy: 0.6852
Epoch 24/1000
162/162 [==============================] - 0s 80us/step - loss: 0.6000 - accuracy: 0.6605 - val_loss: 0.5539 - val_accuracy: 0.7407
Epoch 25/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5960 - accuracy: 0.6235 - val_loss: 0.5391 - val_accuracy: 0.7407
Epoch 26/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5942 - accuracy: 0.6605 - val_loss: 0.5358 - val_accuracy: 0.7407
Epoch 27/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5937 - accuracy: 0.6667 - val_loss: 0.5303 - val_accuracy: 0.7407
Epoch 28/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5939 - accuracy: 0.6605 - val_loss: 0.5275 - val_accuracy: 0.7593
Epoch 29/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5968 - accuracy: 0.6543 - val_loss: 0.5256 - val_accuracy: 0.7593
Epoch 30/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5941 - accuracy: 0.6605 - val_loss: 0.5381 - val_accuracy: 0.7407
Epoch 31/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5906 - accuracy: 0.6481 - val_loss: 0.5447 - val_accuracy: 0.7407

Epoch 00031: ReduceLROnPlateau reducing learning rate to 0.0022499999031424522.
Epoch 32/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5887 - accuracy: 0.6296 - val_loss: 0.5487 - val_accuracy: 0.7222
Epoch 33/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5872 - accuracy: 0.6420 - val_loss: 0.5544 - val_accuracy: 0.7222
Epoch 34/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5863 - accuracy: 0.6543 - val_loss: 0.5616 - val_accuracy: 0.7037
Epoch 35/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5880 - accuracy: 0.6728 - val_loss: 0.5678 - val_accuracy: 0.7037
Epoch 36/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5871 - accuracy: 0.6605 - val_loss: 0.5671 - val_accuracy: 0.7037
Epoch 37/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5873 - accuracy: 0.6667 - val_loss: 0.5772 - val_accuracy: 0.6667
Epoch 38/1000
162/162 [==============================] - 0s 62us/step - loss: 0.5911 - accuracy: 0.6667 - val_loss: 0.5921 - val_accuracy: 0.6481
Epoch 39/1000
162/162 [==============================] - 0s 68us/step - loss: 0.5906 - accuracy: 0.6728 - val_loss: 0.5819 - val_accuracy: 0.6481
Epoch 40/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5884 - accuracy: 0.6605 - val_loss: 0.5711 - val_accuracy: 0.6852
Epoch 41/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5847 - accuracy: 0.6667 - val_loss: 0.5655 - val_accuracy: 0.7037

Epoch 00041: ReduceLROnPlateau reducing learning rate to 0.0011249999515712261.
Epoch 42/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5840 - accuracy: 0.6790 - val_loss: 0.5584 - val_accuracy: 0.7037
Epoch 43/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5841 - accuracy: 0.6543 - val_loss: 0.5531 - val_accuracy: 0.7222
Epoch 44/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5840 - accuracy: 0.6543 - val_loss: 0.5525 - val_accuracy: 0.7222
Epoch 45/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5830 - accuracy: 0.6543 - val_loss: 0.5550 - val_accuracy: 0.7222
Epoch 46/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5829 - accuracy: 0.6481 - val_loss: 0.5579 - val_accuracy: 0.7037
Epoch 47/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5836 - accuracy: 0.6667 - val_loss: 0.5630 - val_accuracy: 0.7037
Epoch 48/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5835 - accuracy: 0.6728 - val_loss: 0.5657 - val_accuracy: 0.7037
Epoch 49/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5822 - accuracy: 0.6728 - val_loss: 0.5614 - val_accuracy: 0.7037
Epoch 50/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5824 - accuracy: 0.6667 - val_loss: 0.5586 - val_accuracy: 0.7037
Epoch 51/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5821 - accuracy: 0.6605 - val_loss: 0.5580 - val_accuracy: 0.7037

Epoch 00051: ReduceLROnPlateau reducing learning rate to 0.0005624999757856131.
Epoch 52/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5818 - accuracy: 0.6605 - val_loss: 0.5587 - val_accuracy: 0.7037
Epoch 53/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5816 - accuracy: 0.6605 - val_loss: 0.5568 - val_accuracy: 0.7037
Epoch 54/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5816 - accuracy: 0.6605 - val_loss: 0.5565 - val_accuracy: 0.7037
Epoch 55/1000
162/162 [==============================] - 0s 68us/step - loss: 0.5815 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037
Epoch 56/1000
162/162 [==============================] - 0s 68us/step - loss: 0.5813 - accuracy: 0.6605 - val_loss: 0.5588 - val_accuracy: 0.7037
Epoch 57/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5809 - accuracy: 0.6605 - val_loss: 0.5629 - val_accuracy: 0.7037
Epoch 58/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5814 - accuracy: 0.6728 - val_loss: 0.5669 - val_accuracy: 0.7037
Epoch 59/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5819 - accuracy: 0.6728 - val_loss: 0.5681 - val_accuracy: 0.6852
Epoch 60/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5819 - accuracy: 0.6728 - val_loss: 0.5651 - val_accuracy: 0.7037
Epoch 61/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5817 - accuracy: 0.6728 - val_loss: 0.5666 - val_accuracy: 0.6852

Epoch 00061: ReduceLROnPlateau reducing learning rate to 0.00028124998789280653.
Epoch 62/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5816 - accuracy: 0.6728 - val_loss: 0.5674 - val_accuracy: 0.6852
Epoch 63/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5818 - accuracy: 0.6790 - val_loss: 0.5691 - val_accuracy: 0.6852
Epoch 64/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5820 - accuracy: 0.6790 - val_loss: 0.5677 - val_accuracy: 0.6852
Epoch 65/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5816 - accuracy: 0.6790 - val_loss: 0.5678 - val_accuracy: 0.6852
Epoch 66/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5816 - accuracy: 0.6790 - val_loss: 0.5681 - val_accuracy: 0.6852
Epoch 67/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5816 - accuracy: 0.6790 - val_loss: 0.5675 - val_accuracy: 0.6852
Epoch 68/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5815 - accuracy: 0.6667 - val_loss: 0.5660 - val_accuracy: 0.6852
Epoch 69/1000
162/162 [==============================] - 0s 68us/step - loss: 0.5812 - accuracy: 0.6728 - val_loss: 0.5650 - val_accuracy: 0.7037
Epoch 70/1000
162/162 [==============================] - 0s 62us/step - loss: 0.5811 - accuracy: 0.6728 - val_loss: 0.5631 - val_accuracy: 0.7037
Epoch 71/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5806 - accuracy: 0.6667 - val_loss: 0.5606 - val_accuracy: 0.7037

Epoch 00071: ReduceLROnPlateau reducing learning rate to 0.00014062499394640326.
Epoch 72/1000
162/162 [==============================] - 0s 74us/step - loss: 0.5806 - accuracy: 0.6605 - val_loss: 0.5601 - val_accuracy: 0.7037
Epoch 73/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5806 - accuracy: 0.6605 - val_loss: 0.5599 - val_accuracy: 0.7037
Epoch 74/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5805 - accuracy: 0.6605 - val_loss: 0.5594 - val_accuracy: 0.7037
Epoch 75/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5805 - accuracy: 0.6605 - val_loss: 0.5597 - val_accuracy: 0.7037
Epoch 76/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5805 - accuracy: 0.6605 - val_loss: 0.5604 - val_accuracy: 0.7037
Epoch 77/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5805 - accuracy: 0.6605 - val_loss: 0.5607 - val_accuracy: 0.7037
Epoch 78/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5804 - accuracy: 0.6605 - val_loss: 0.5605 - val_accuracy: 0.7037
Epoch 79/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5804 - accuracy: 0.6605 - val_loss: 0.5601 - val_accuracy: 0.7037
Epoch 80/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5804 - accuracy: 0.6605 - val_loss: 0.5599 - val_accuracy: 0.7037
Epoch 81/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5804 - accuracy: 0.6605 - val_loss: 0.5593 - val_accuracy: 0.7037

Epoch 00081: ReduceLROnPlateau reducing learning rate to 7.031249697320163e-05.
Epoch 82/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5803 - accuracy: 0.6605 - val_loss: 0.5593 - val_accuracy: 0.7037
Epoch 83/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5803 - accuracy: 0.6605 - val_loss: 0.5593 - val_accuracy: 0.7037
Epoch 84/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5803 - accuracy: 0.6605 - val_loss: 0.5588 - val_accuracy: 0.7037
Epoch 85/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5584 - val_accuracy: 0.7037
Epoch 86/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5584 - val_accuracy: 0.7037
Epoch 87/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5582 - val_accuracy: 0.7037
Epoch 88/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5582 - val_accuracy: 0.7037
Epoch 89/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5579 - val_accuracy: 0.7037
Epoch 90/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 91/1000
162/162 [==============================] - 0s 290us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037

Epoch 00091: ReduceLROnPlateau reducing learning rate to 3.5156248486600816e-05.
Epoch 92/1000
162/162 [==============================] - 0s 271us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 93/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 94/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 95/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5802 - accuracy: 0.6605 - val_loss: 0.5574 - val_accuracy: 0.7037
Epoch 96/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5574 - val_accuracy: 0.7037
Epoch 97/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037
Epoch 98/1000
162/162 [==============================] - 0s 185us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5574 - val_accuracy: 0.7037
Epoch 99/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5573 - val_accuracy: 0.7037
Epoch 100/1000
162/162 [==============================] - 0s 197us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5574 - val_accuracy: 0.7037
Epoch 101/1000
162/162 [==============================] - 0s 173us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037

Epoch 00101: ReduceLROnPlateau reducing learning rate to 1.7578124243300408e-05.
Epoch 102/1000
162/162 [==============================] - 0s 179us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037
Epoch 103/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 104/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 105/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 106/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 107/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 108/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037
Epoch 109/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 110/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5575 - val_accuracy: 0.7037
Epoch 111/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037

Epoch 00111: ReduceLROnPlateau reducing learning rate to 8.789062121650204e-06.
Epoch 112/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5576 - val_accuracy: 0.7037
Epoch 113/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 114/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 115/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 116/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 117/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 118/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 119/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 120/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 121/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037

Epoch 00121: ReduceLROnPlateau reducing learning rate to 4.394531060825102e-06.
Epoch 122/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 123/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 124/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 125/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 126/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 127/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 128/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 129/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 130/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5578 - val_accuracy: 0.7037
Epoch 131/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00131: ReduceLROnPlateau reducing learning rate to 2.197265530412551e-06.
Epoch 132/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 133/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 134/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 135/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 136/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 137/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 138/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 139/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 140/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 141/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00141: ReduceLROnPlateau reducing learning rate to 1.0986327652062755e-06.
Epoch 142/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 143/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 144/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 145/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 146/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 147/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 148/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 149/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 150/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 151/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00151: ReduceLROnPlateau reducing learning rate to 5.493163826031378e-07.
Epoch 152/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 153/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 154/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 155/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 156/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 157/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 158/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 159/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 160/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 161/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00161: ReduceLROnPlateau reducing learning rate to 2.746581913015689e-07.
Epoch 162/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 163/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 164/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 165/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 166/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5801 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 167/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 168/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 169/1000
162/162 [==============================] - 0s 167us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 170/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 171/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00171: ReduceLROnPlateau reducing learning rate to 1.3732909565078444e-07.
Epoch 172/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 173/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 174/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 175/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 176/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 177/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 178/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 179/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 180/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 181/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00181: ReduceLROnPlateau reducing learning rate to 6.866454782539222e-08.
Epoch 182/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 183/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 184/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 185/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 186/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 187/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 188/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 189/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 190/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 191/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00191: ReduceLROnPlateau reducing learning rate to 3.433227391269611e-08.
Epoch 192/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 193/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 194/1000
162/162 [==============================] - 0s 197us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 195/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 196/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 197/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 198/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 199/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 200/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 201/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00201: ReduceLROnPlateau reducing learning rate to 1.7166136956348055e-08.
Epoch 202/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 203/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 204/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 205/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 206/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 207/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 208/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 209/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 210/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 211/1000
162/162 [==============================] - 0s 234us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00211: ReduceLROnPlateau reducing learning rate to 8.583068478174027e-09.
Epoch 212/1000
162/162 [==============================] - 0s 173us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 213/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 214/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 215/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 216/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 217/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 218/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 219/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 220/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 221/1000
162/162 [==============================] - 0s 2ms/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00221: ReduceLROnPlateau reducing learning rate to 4.291534239087014e-09.
Epoch 222/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 223/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 224/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 225/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 226/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 227/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 228/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 229/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 230/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 231/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00231: ReduceLROnPlateau reducing learning rate to 2.145767119543507e-09.
Epoch 232/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 233/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 234/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 235/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 236/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 237/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 238/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 239/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 240/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 241/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00241: ReduceLROnPlateau reducing learning rate to 1.0728835597717534e-09.
Epoch 242/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 243/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 244/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 245/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 246/1000
162/162 [==============================] - 0s 154us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 247/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 248/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 249/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 250/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 251/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00251: ReduceLROnPlateau reducing learning rate to 5.364417798858767e-10.
Epoch 252/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 253/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 254/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 255/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 256/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 257/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 258/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 259/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 260/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 261/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00261: ReduceLROnPlateau reducing learning rate to 2.6822088994293836e-10.
Epoch 262/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 263/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 264/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 265/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 266/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 267/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 268/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 269/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 270/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 271/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00271: ReduceLROnPlateau reducing learning rate to 1.3411044497146918e-10.
Epoch 272/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 273/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 274/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 275/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 276/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 277/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 278/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 279/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 280/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 281/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00281: ReduceLROnPlateau reducing learning rate to 6.705522248573459e-11.
Epoch 282/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 283/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 284/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 285/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 286/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 287/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 288/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 289/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 290/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 291/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00291: ReduceLROnPlateau reducing learning rate to 3.3527611242867295e-11.
Epoch 292/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 293/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 294/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 295/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 296/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 297/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 298/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 299/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 300/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 301/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00301: ReduceLROnPlateau reducing learning rate to 1.6763805621433647e-11.
Epoch 302/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 303/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 304/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 305/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 306/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 307/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 308/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 309/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 310/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 311/1000
162/162 [==============================] - 0s 86us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00311: ReduceLROnPlateau reducing learning rate to 8.381902810716824e-12.
Epoch 312/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 313/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 314/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 315/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 316/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 317/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 318/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 319/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 320/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 321/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00321: ReduceLROnPlateau reducing learning rate to 4.190951405358412e-12.
Epoch 322/1000
162/162 [==============================] - 0s 80us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 323/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 324/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 325/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 326/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 327/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 328/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 329/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 330/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 331/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00331: ReduceLROnPlateau reducing learning rate to 2.095475702679206e-12.
Epoch 332/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 333/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 334/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 335/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 336/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 337/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 338/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 339/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 340/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 341/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00341: ReduceLROnPlateau reducing learning rate to 1.047737851339603e-12.
Epoch 342/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 343/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 344/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 345/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 346/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 347/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 348/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 349/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 350/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 351/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00351: ReduceLROnPlateau reducing learning rate to 5.238689256698015e-13.
Epoch 352/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 353/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 354/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 355/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 356/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 357/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 358/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 359/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 360/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 361/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00361: ReduceLROnPlateau reducing learning rate to 2.6193446283490074e-13.
Epoch 362/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 363/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 364/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 365/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 366/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 367/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 368/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 369/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 370/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 371/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00371: ReduceLROnPlateau reducing learning rate to 1.3096723141745037e-13.
Epoch 372/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 373/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 374/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 375/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 376/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 377/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 378/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 379/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 380/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 381/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00381: ReduceLROnPlateau reducing learning rate to 6.548361570872518e-14.
Epoch 382/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 383/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 384/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 385/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 386/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 387/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 388/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 389/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 390/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 391/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00391: ReduceLROnPlateau reducing learning rate to 3.274180785436259e-14.
Epoch 392/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 393/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 394/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 395/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 396/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 397/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 398/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 399/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 400/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 401/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00401: ReduceLROnPlateau reducing learning rate to 1.6370903927181296e-14.
Epoch 402/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 403/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 404/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 405/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 406/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 407/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 408/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 409/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 410/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 411/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00411: ReduceLROnPlateau reducing learning rate to 8.185451963590648e-15.
Epoch 412/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 413/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 414/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 415/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 416/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 417/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 418/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 419/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 420/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 421/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00421: ReduceLROnPlateau reducing learning rate to 4.092725981795324e-15.
Epoch 422/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 423/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 424/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 425/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 426/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 427/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 428/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 429/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 430/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 431/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00431: ReduceLROnPlateau reducing learning rate to 2.046362990897662e-15.
Epoch 432/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 433/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 434/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 435/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 436/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 437/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 438/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 439/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 440/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 441/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00441: ReduceLROnPlateau reducing learning rate to 1.023181495448831e-15.
Epoch 442/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 443/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 444/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 445/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 446/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 447/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 448/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 449/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 450/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 451/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00451: ReduceLROnPlateau reducing learning rate to 5.115907477244155e-16.
Epoch 452/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 453/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 454/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 455/1000
162/162 [==============================] - ETA: 0s - loss: 0.6148 - accuracy: 0.62 - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 456/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 457/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 458/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 459/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 460/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 461/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00461: ReduceLROnPlateau reducing learning rate to 2.5579537386220775e-16.
Epoch 462/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 463/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 464/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 465/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 466/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 467/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 468/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 469/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 470/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 471/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00471: ReduceLROnPlateau reducing learning rate to 1.2789768693110388e-16.
Epoch 472/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 473/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 474/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 475/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 476/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 477/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 478/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 479/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 480/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 481/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00481: ReduceLROnPlateau reducing learning rate to 6.394884346555194e-17.
Epoch 482/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 483/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 484/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 485/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 486/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 487/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 488/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 489/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 490/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 491/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00491: ReduceLROnPlateau reducing learning rate to 3.197442173277597e-17.
Epoch 492/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 493/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 494/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 495/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 496/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 497/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 498/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 499/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 500/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 501/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00501: ReduceLROnPlateau reducing learning rate to 1.5987210866387985e-17.
Epoch 502/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 503/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 504/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 505/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 506/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 507/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 508/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 509/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 510/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 511/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00511: ReduceLROnPlateau reducing learning rate to 7.993605433193992e-18.
Epoch 512/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 513/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 514/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 515/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 516/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 517/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 518/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 519/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 520/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 521/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00521: ReduceLROnPlateau reducing learning rate to 3.996802716596996e-18.
Epoch 522/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 523/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 524/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 525/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 526/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 527/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 528/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 529/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 530/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 531/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00531: ReduceLROnPlateau reducing learning rate to 1.998401358298498e-18.
Epoch 532/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 533/1000
162/162 [==============================] - ETA: 0s - loss: 0.6818 - accuracy: 0.53 - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 534/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 535/1000
162/162 [==============================] - ETA: 0s - loss: 0.5717 - accuracy: 0.59 - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 536/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 537/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 538/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 539/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 540/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 541/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00541: ReduceLROnPlateau reducing learning rate to 9.99200679149249e-19.
Epoch 542/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 543/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 544/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 545/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 546/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 547/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 548/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 549/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 550/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 551/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00551: ReduceLROnPlateau reducing learning rate to 4.996003395746245e-19.
Epoch 552/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 553/1000
162/162 [==============================] - 0s 167us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 554/1000
162/162 [==============================] - ETA: 0s - loss: 0.6048 - accuracy: 0.56 - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 555/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 556/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 557/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 558/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 559/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 560/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 561/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00561: ReduceLROnPlateau reducing learning rate to 2.4980016978731226e-19.
Epoch 562/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 563/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 564/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 565/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 566/1000
162/162 [==============================] - 0s 234us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 567/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 568/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 569/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 570/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 571/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00571: ReduceLROnPlateau reducing learning rate to 1.2490008489365613e-19.
Epoch 572/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 573/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 574/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 575/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 576/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 577/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 578/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 579/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 580/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 581/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00581: ReduceLROnPlateau reducing learning rate to 6.245004244682806e-20.
Epoch 582/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 583/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 584/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 585/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 586/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 587/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 588/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 589/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 590/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 591/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00591: ReduceLROnPlateau reducing learning rate to 3.122502122341403e-20.
Epoch 592/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 593/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 594/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 595/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 596/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 597/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 598/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 599/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 600/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 601/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00601: ReduceLROnPlateau reducing learning rate to 1.5612510611707016e-20.
Epoch 602/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 603/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 604/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 605/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 606/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 607/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 608/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 609/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 610/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 611/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00611: ReduceLROnPlateau reducing learning rate to 7.806255305853508e-21.
Epoch 612/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 613/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 614/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 615/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 616/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 617/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 618/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 619/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 620/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 621/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00621: ReduceLROnPlateau reducing learning rate to 3.903127652926754e-21.
Epoch 622/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 623/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 624/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 625/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 626/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 627/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 628/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 629/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 630/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 631/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00631: ReduceLROnPlateau reducing learning rate to 1.951563826463377e-21.
Epoch 632/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 633/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 634/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 635/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 636/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 637/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 638/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 639/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 640/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 641/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00641: ReduceLROnPlateau reducing learning rate to 9.757819132316885e-22.
Epoch 642/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 643/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 644/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 645/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 646/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 647/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 648/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 649/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 650/1000
162/162 [==============================] - ETA: 0s - loss: 0.6500 - accuracy: 0.46 - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 651/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00651: ReduceLROnPlateau reducing learning rate to 4.878909566158443e-22.
Epoch 652/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 653/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 654/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 655/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 656/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 657/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 658/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 659/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 660/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 661/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00661: ReduceLROnPlateau reducing learning rate to 2.4394547830792213e-22.
Epoch 662/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 663/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 664/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 665/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 666/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 667/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 668/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 669/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 670/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 671/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00671: ReduceLROnPlateau reducing learning rate to 1.2197273915396106e-22.
Epoch 672/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 673/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 674/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 675/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 676/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 677/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 678/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 679/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 680/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 681/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00681: ReduceLROnPlateau reducing learning rate to 6.098636957698053e-23.
Epoch 682/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 683/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 684/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 685/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 686/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 687/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 688/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 689/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 690/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 691/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00691: ReduceLROnPlateau reducing learning rate to 3.0493184788490266e-23.
Epoch 692/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 693/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 694/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 695/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 696/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 697/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 698/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 699/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 700/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 701/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00701: ReduceLROnPlateau reducing learning rate to 1.5246592394245133e-23.
Epoch 702/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 703/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 704/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 705/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 706/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 707/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 708/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 709/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 710/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 711/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00711: ReduceLROnPlateau reducing learning rate to 7.623296197122566e-24.
Epoch 712/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 713/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 714/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 715/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 716/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 717/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 718/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 719/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 720/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 721/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00721: ReduceLROnPlateau reducing learning rate to 3.811648098561283e-24.
Epoch 722/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 723/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 724/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 725/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 726/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 727/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 728/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 729/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 730/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 731/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00731: ReduceLROnPlateau reducing learning rate to 1.9058240492806416e-24.
Epoch 732/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 733/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 734/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 735/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 736/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 737/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 738/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 739/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 740/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 741/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00741: ReduceLROnPlateau reducing learning rate to 9.529120246403208e-25.
Epoch 742/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 743/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 744/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 745/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 746/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 747/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 748/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 749/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 750/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 751/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00751: ReduceLROnPlateau reducing learning rate to 4.764560123201604e-25.
Epoch 752/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 753/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 754/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 755/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 756/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 757/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 758/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 759/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 760/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 761/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00761: ReduceLROnPlateau reducing learning rate to 2.382280061600802e-25.
Epoch 762/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 763/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 764/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 765/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 766/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 767/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 768/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 769/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 770/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 771/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00771: ReduceLROnPlateau reducing learning rate to 1.191140030800401e-25.
Epoch 772/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 773/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 774/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 775/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 776/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 777/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 778/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 779/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 780/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 781/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00781: ReduceLROnPlateau reducing learning rate to 5.955700154002005e-26.
Epoch 782/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 783/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 784/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 785/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 786/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 787/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 788/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 789/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 790/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 791/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00791: ReduceLROnPlateau reducing learning rate to 2.9778500770010025e-26.
Epoch 792/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 793/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 794/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 795/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 796/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 797/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 798/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 799/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 800/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 801/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00801: ReduceLROnPlateau reducing learning rate to 1.4889250385005013e-26.
Epoch 802/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 803/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 804/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 805/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 806/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 807/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 808/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 809/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 810/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 811/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00811: ReduceLROnPlateau reducing learning rate to 7.444625192502506e-27.
Epoch 812/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 813/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 814/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 815/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 816/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 817/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 818/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 819/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 820/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 821/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00821: ReduceLROnPlateau reducing learning rate to 3.722312596251253e-27.
Epoch 822/1000
162/162 [==============================] - 0s 93us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 823/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 824/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 825/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 826/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 827/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 828/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 829/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 830/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 831/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00831: ReduceLROnPlateau reducing learning rate to 1.8611562981256266e-27.
Epoch 832/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 833/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 834/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 835/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 836/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 837/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 838/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 839/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 840/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 841/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00841: ReduceLROnPlateau reducing learning rate to 9.305781490628133e-28.
Epoch 842/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 843/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 844/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 845/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 846/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 847/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 848/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 849/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 850/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 851/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00851: ReduceLROnPlateau reducing learning rate to 4.6528907453140665e-28.
Epoch 852/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 853/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 854/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 855/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 856/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 857/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 858/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 859/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 860/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 861/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00861: ReduceLROnPlateau reducing learning rate to 2.3264453726570332e-28.
Epoch 862/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 863/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 864/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 865/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 866/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 867/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 868/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 869/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 870/1000
162/162 [==============================] - 0s 142us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 871/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00871: ReduceLROnPlateau reducing learning rate to 1.1632226863285166e-28.
Epoch 872/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 873/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 874/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 875/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 876/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 877/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 878/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 879/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 880/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 881/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00881: ReduceLROnPlateau reducing learning rate to 5.816113431642583e-29.
Epoch 882/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 883/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 884/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 885/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 886/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 887/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 888/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 889/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 890/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 891/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00891: ReduceLROnPlateau reducing learning rate to 2.9080567158212915e-29.
Epoch 892/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 893/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 894/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 895/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 896/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 897/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 898/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 899/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 900/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 901/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00901: ReduceLROnPlateau reducing learning rate to 1.4540283579106458e-29.
Epoch 902/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 903/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 904/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 905/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 906/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 907/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 908/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 909/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 910/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 911/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00911: ReduceLROnPlateau reducing learning rate to 7.270141789553229e-30.
Epoch 912/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 913/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 914/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 915/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 916/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 917/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 918/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 919/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 920/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 921/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00921: ReduceLROnPlateau reducing learning rate to 3.6350708947766144e-30.
Epoch 922/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 923/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 924/1000
162/162 [==============================] - 0s 160us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 925/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 926/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 927/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 928/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 929/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 930/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 931/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00931: ReduceLROnPlateau reducing learning rate to 1.8175354473883072e-30.
Epoch 932/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 933/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 934/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 935/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 936/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 937/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 938/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 939/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 940/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 941/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00941: ReduceLROnPlateau reducing learning rate to 9.087677236941536e-31.
Epoch 942/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 943/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 944/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 945/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 946/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 947/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 948/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 949/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 950/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 951/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00951: ReduceLROnPlateau reducing learning rate to 4.543838618470768e-31.
Epoch 952/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 953/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 954/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 955/1000
162/162 [==============================] - 0s 148us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 956/1000
162/162 [==============================] - 0s 136us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 957/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 958/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 959/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 960/1000
162/162 [==============================] - 0s 130us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 961/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00961: ReduceLROnPlateau reducing learning rate to 2.271919309235384e-31.
Epoch 962/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 963/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 964/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 965/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 966/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 967/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 968/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 969/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 970/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 971/1000
162/162 [==============================] - ETA: 0s - loss: 0.6282 - accuracy: 0.56 - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00971: ReduceLROnPlateau reducing learning rate to 1.135959654617692e-31.
Epoch 972/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 973/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 974/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 975/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 976/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 977/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 978/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 979/1000
162/162 [==============================] - 0s 117us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 980/1000
162/162 [==============================] - 0s 123us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 981/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00981: ReduceLROnPlateau reducing learning rate to 5.67979827308846e-32.
Epoch 982/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 983/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 984/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 985/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 986/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 987/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 988/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 989/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 990/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 991/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037

Epoch 00991: ReduceLROnPlateau reducing learning rate to 2.83989913654423e-32.
Epoch 992/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 993/1000
162/162 [==============================] - 0s 111us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 994/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 995/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 996/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 997/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 998/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 999/1000
162/162 [==============================] - 0s 105us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
Epoch 1000/1000
162/162 [==============================] - 0s 99us/step - loss: 0.5800 - accuracy: 0.6605 - val_loss: 0.5577 - val_accuracy: 0.7037
In [127]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 1000)
In [128]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
54/54 [==============================] - 0s 74us/step
test loss: 0.5576540055098357, test accuracy: 0.7037037014961243
In [129]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6190476190476191
In [130]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.08860759493670889
[[35  7]
 [ 9  3]]

KMeans

In [207]:
X
Out[207]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13
0 -1.420085 -0.330086 0.476982 0.852458 -0.881089 -0.037777 -0.607746 -0.070107 -0.654790 -0.418599 -0.802899 -0.351189 1.002024
1 -0.288523 -1.545259 -1.137074 1.113906 0.311278 -1.566031 -2.038457 -0.322607 -0.291288 1.094837 0.274704 1.777243 2.309084
2 -0.424115 0.410085 0.838888 0.219947 -0.375953 -0.789691 -0.335746 -0.306896 -0.962051 0.861800 -0.453581 0.612777 0.646240
3 -0.436131 -1.584784 0.658995 0.766397 -1.275956 1.369786 1.180080 -2.004124 -0.366360 0.460536 -0.946577 0.167472 0.641078
4 0.000543 0.507984 -0.978397 -0.501031 0.347848 0.605158 0.571957 -0.261476 0.046623 -0.176286 0.124656 0.069963 -0.657263
5 0.113948 -0.640675 -1.529179 -1.089360 0.638256 1.024606 0.805429 0.597420 -0.503707 -0.866724 0.651241 1.674008 0.860657
6 0.807950 -1.039887 2.968459 1.111222 0.521703 3.834393 -0.154811 -0.070639 -0.395759 -0.640282 -0.725587 -0.334158 -0.618257
7 -0.465670 0.173616 0.264449 2.278838 1.672258 0.689505 1.154324 1.417257 1.035726 0.390329 -1.217981 -1.123742 0.066049
8 0.235109 -0.290452 -0.928556 0.659411 -0.320465 0.170612 0.154061 0.895453 0.758427 -0.408150 -1.056703 0.829210 2.099893
9 0.296153 1.351512 -0.018047 0.386276 -0.911469 0.796022 -0.567061 -0.077659 0.096104 -0.635810 -0.597365 -1.005655 0.087116
10 1.304720 0.208245 0.583743 1.972234 -0.030334 0.162684 -0.208981 0.601801 -1.515513 -3.719401 -0.895641 0.233657 -1.060053
11 1.288979 -0.851636 -0.002189 -0.753502 -0.288197 -1.486411 -0.866242 0.068441 -0.112261 0.084143 0.404865 -1.121813 0.068799
12 0.802167 1.125872 0.181614 -0.054998 0.125978 -0.118093 -0.204100 -0.240702 0.725047 -0.060416 0.610221 0.097037 0.758946
13 -0.570677 -0.903477 0.077307 1.133679 -0.194704 -2.680079 -1.869617 0.413585 -0.778149 0.810256 0.538818 1.572772 2.002755
14 0.390585 1.185091 1.060521 -0.143387 0.154017 -0.184047 -0.178747 -1.592611 0.195876 0.676819 1.052625 -0.193902 0.538474
15 -0.294008 1.505226 0.525191 0.408188 0.012660 0.846051 0.444150 -0.303052 0.701059 0.270748 -0.345435 -0.554816 -0.663403
16 0.504074 0.589989 0.264178 -0.853628 -1.595569 -0.010760 0.220518 0.048998 0.250424 0.796887 -0.099404 -1.017561 -0.397998
17 -0.894319 -1.371343 -0.705746 0.481594 -0.141646 -0.042173 -0.245037 0.348273 0.688880 -0.027577 -0.865534 -1.210148 0.271172
18 0.851995 0.048612 0.066938 0.223581 -0.911164 -1.152240 -0.070054 -0.416772 -0.480978 -0.106632 -0.231062 -0.850191 0.896367
19 -0.660027 -0.598339 -0.142295 1.087024 -2.982385 -0.973575 0.199468 -2.163834 -0.508427 1.292035 -1.534447 -0.433825 0.590292
20 -0.566037 -2.086297 0.061463 -0.482666 -1.080805 0.084488 -0.543972 0.386352 -0.614930 -0.730628 -0.994692 0.783494 0.132430
21 1.221627 -0.027554 0.188106 -0.974894 0.005596 1.102640 -0.048800 -0.490088 -1.842707 0.102032 0.157223 0.601727 1.366630
22 -1.183193 -0.312675 -0.568276 -0.639410 -0.208614 0.261554 -0.216256 0.261899 0.331089 0.345426 0.485643 0.504079 0.397818
23 -0.801169 0.116605 -0.579690 0.362276 0.606309 -0.570757 -1.548593 0.718826 2.916797 2.075668 0.518297 -0.400812 -0.252481
24 -0.891340 0.309727 -0.842676 0.310338 0.577762 -0.334300 -1.473445 0.513897 2.805987 2.062959 0.635321 -0.248494 -0.258253
25 -0.130263 1.334103 0.730042 0.678688 -0.720646 -0.666357 0.000279 -0.436292 0.339978 1.010170 0.805445 0.781825 -0.399769
26 -1.602028 1.309495 0.611484 0.700892 -0.666054 -0.863765 -0.615702 -0.509079 0.601651 0.871639 -0.301423 -1.640656 -0.131728
27 -0.525208 1.525178 -0.030851 -0.074993 0.453039 1.307113 0.452591 -0.220734 0.853076 0.349373 -0.297713 0.168680 -0.091233
28 0.009250 -0.578236 -0.700373 -1.085258 -0.295803 -0.883641 2.594736 0.321904 0.390719 -1.529163 -0.511611 -1.805971 0.998148
29 0.280166 -0.217504 -2.181350 0.187090 0.622591 -2.686573 -0.214488 1.438992 1.094095 -1.319265 0.002128 -3.970580 -0.678777
... ... ... ... ... ... ... ... ... ... ... ... ... ...
186 -0.713223 -0.165097 0.170953 -0.307601 1.152684 -0.045434 -0.432254 0.388447 -0.400713 -0.024795 -0.444954 0.651175 0.856965
187 0.077144 -0.527913 0.300841 -0.007145 -0.844229 -0.260791 -1.196957 0.080170 0.801803 0.095003 -0.099609 0.698672 -0.673575
188 -0.818527 -0.256716 0.589448 -0.243361 0.027958 0.218148 0.278190 0.579538 0.547728 1.017426 0.085377 -1.012860 -1.117093
189 0.078094 -0.693228 -0.177029 0.143179 -2.181764 -1.150077 -0.455986 2.342589 0.559829 -0.323766 1.119820 0.558999 1.029247
190 0.473118 -0.619480 -0.613859 -1.390025 -2.181316 -1.933866 -1.862714 2.547096 0.230172 -1.472410 0.795467 0.361148 0.443935
191 -0.725236 0.975099 1.683062 -0.427378 1.353092 -0.378540 0.888469 0.944767 0.523458 -0.783620 0.384682 0.536515 0.242834
192 -0.098130 0.349984 0.651382 0.850819 0.452135 0.155512 0.327726 0.884508 -0.713577 -0.090647 0.810323 0.862330 0.315889
193 -0.629175 0.598836 0.560497 0.331765 0.692832 0.040925 0.021748 -0.346042 -0.072500 -0.054767 1.405269 0.652914 0.119071
194 -0.593283 -1.762364 -1.292266 -0.014741 0.048404 1.051485 1.378317 0.769360 -0.538726 -0.881937 -0.734744 -0.034792 0.551083
195 -0.220725 -0.706286 -0.558429 -0.543495 -0.762451 -0.724549 0.033926 0.427916 0.381670 0.691530 1.157624 0.554176 0.085881
196 0.542442 -0.314573 -0.389836 1.340826 -0.685860 1.357357 -0.180731 -0.134883 1.542849 -0.544367 -1.675576 0.661188 -1.008124
197 0.881881 0.686691 1.412427 0.067865 -0.239689 0.560162 0.252073 -1.596612 0.028578 0.915895 -0.464715 0.139984 0.221738
198 0.056082 0.354969 -0.320369 -0.059290 -0.029903 -0.037445 0.463365 1.036375 -0.996880 -0.421318 -0.105980 0.153928 -0.138210
199 -0.069357 0.007801 -0.207830 -0.057174 -0.226654 0.215090 -0.377980 0.770378 0.045795 0.749183 -0.041079 0.507759 0.857287
200 0.469206 -0.809604 -0.887115 -0.746687 -1.496004 0.062379 -0.191488 2.511061 0.807064 1.982195 -1.624512 1.054232 -1.213713
201 -0.020242 -0.015071 -0.121820 -0.371781 -0.501178 0.021533 -0.183053 0.947258 0.121773 0.744517 -0.020175 0.696027 1.176563
202 -0.850319 -0.167766 1.753294 -0.158230 1.464227 -1.912449 0.756675 -1.829681 1.018012 0.323314 -1.687154 0.771247 -1.800694
203 -1.592416 0.518318 -0.040842 -0.026786 0.407023 0.751642 0.136831 -0.519756 -0.647307 -0.663112 -0.108425 -0.070893 0.564588
204 -1.525605 0.408122 -0.068239 -0.027937 0.579957 0.945646 0.028783 -0.684904 -0.605690 -0.554342 0.077226 -0.032084 0.772622
205 0.460758 -0.673974 -1.175590 -0.042902 -0.354361 0.110479 -1.172032 -0.586283 0.068460 -0.294180 0.791941 0.725567 -0.557938
206 -0.654520 -0.315921 0.582093 0.860981 -2.255891 0.280515 -0.167860 -0.060341 0.774684 -0.965387 -0.272312 0.965091 -2.037667
207 -0.746404 -0.272009 0.915424 0.849620 -2.868535 0.209161 -0.279975 -0.187035 1.111337 -0.940798 -0.010679 1.676010 -2.095188
208 -0.406754 0.539656 0.644480 1.231427 0.299700 -0.877891 -0.636255 -0.696446 0.413124 0.006952 -0.652237 -0.705710 -0.605430
209 0.239265 2.028215 -0.262443 -0.154385 -0.408932 -1.139249 0.361170 -0.142219 1.390750 -0.247556 -1.030608 0.769854 0.315698
210 -0.275298 0.271105 0.428430 1.445529 -0.169781 -1.352517 -0.388072 -0.917276 -0.332714 -0.809595 -0.907645 -1.645002 -0.894372
211 0.151626 1.433998 2.658181 -0.681422 -0.210581 1.432182 0.997691 -2.562925 -0.348259 0.825118 2.785329 0.897498 1.962963
212 0.681872 0.671929 0.127597 -0.665368 0.282250 0.541495 1.003312 -0.196048 -0.380222 -1.300609 1.441472 1.382278 0.192263
213 0.706389 0.041252 -0.040976 -0.311682 -0.096455 0.762413 -0.450937 -0.134575 -0.102032 0.750741 -0.104707 -0.048909 0.682180
214 -1.588263 0.426822 -0.788645 -0.221386 0.983329 1.558246 1.332190 0.532878 -0.647525 -0.023726 -0.959608 1.274829 2.334968
215 -0.774487 -1.852850 -0.785635 -0.149473 -0.236634 0.462963 1.000185 -0.047619 -0.310695 -0.127651 0.490345 0.233474 0.163391

216 rows × 13 columns

In [208]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[208]:
[2808.0,
 2548.3417429105866,
 2378.335995807065,
 2256.0960796172503,
 2169.4597299478096,
 2080.2739448933166,
 2001.1226520766459,
 1918.5739069931988,
 1880.9859014602027,
 1796.9350644070514,
 1787.591551933858,
 1721.2471823270262,
 1689.3458120683304,
 1657.5154964943713]
In [209]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[209]:
[<matplotlib.lines.Line2D at 0x1b8293108d0>]

K=2

In [210]:
kmeans_mfcc = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_mfcc.fit(X)
Out[210]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [211]:
kmeans_mfcc.labels_
Out[211]:
array([0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,
       1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1,
       1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0,
       1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0,
       1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0,
       0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,
       1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
In [212]:
clusters_mfcc = kmeans_mfcc.predict(X)
clusters_mfcc
Out[212]:
array([0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0,
       1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1,
       1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1,
       0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0,
       1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0,
       1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0,
       0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,
       1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
In [213]:
X.loc[:,'Cluster'] = clusters_mfcc
X.loc[:,'chosen'] = list(y)
In [214]:
X
Out[214]:
mfccfiles_1 mfccfiles_2 mfccfiles_3 mfccfiles_4 mfccfiles_5 mfccfiles_6 mfccfiles_7 mfccfiles_8 mfccfiles_9 mfccfiles_10 mfccfiles_11 mfccfiles_12 mfccfiles_13 Cluster chosen
0 -1.420085 -0.330086 0.476982 0.852458 -0.881089 -0.037777 -0.607746 -0.070107 -0.654790 -0.418599 -0.802899 -0.351189 1.002024 0 0
1 -0.288523 -1.545259 -1.137074 1.113906 0.311278 -1.566031 -2.038457 -0.322607 -0.291288 1.094837 0.274704 1.777243 2.309084 0 0
2 -0.424115 0.410085 0.838888 0.219947 -0.375953 -0.789691 -0.335746 -0.306896 -0.962051 0.861800 -0.453581 0.612777 0.646240 0 0
3 -0.436131 -1.584784 0.658995 0.766397 -1.275956 1.369786 1.180080 -2.004124 -0.366360 0.460536 -0.946577 0.167472 0.641078 0 0
4 0.000543 0.507984 -0.978397 -0.501031 0.347848 0.605158 0.571957 -0.261476 0.046623 -0.176286 0.124656 0.069963 -0.657263 1 0
5 0.113948 -0.640675 -1.529179 -1.089360 0.638256 1.024606 0.805429 0.597420 -0.503707 -0.866724 0.651241 1.674008 0.860657 1 0
6 0.807950 -1.039887 2.968459 1.111222 0.521703 3.834393 -0.154811 -0.070639 -0.395759 -0.640282 -0.725587 -0.334158 -0.618257 0 0
7 -0.465670 0.173616 0.264449 2.278838 1.672258 0.689505 1.154324 1.417257 1.035726 0.390329 -1.217981 -1.123742 0.066049 0 0
8 0.235109 -0.290452 -0.928556 0.659411 -0.320465 0.170612 0.154061 0.895453 0.758427 -0.408150 -1.056703 0.829210 2.099893 1 0
9 0.296153 1.351512 -0.018047 0.386276 -0.911469 0.796022 -0.567061 -0.077659 0.096104 -0.635810 -0.597365 -1.005655 0.087116 0 0
10 1.304720 0.208245 0.583743 1.972234 -0.030334 0.162684 -0.208981 0.601801 -1.515513 -3.719401 -0.895641 0.233657 -1.060053 0 0
11 1.288979 -0.851636 -0.002189 -0.753502 -0.288197 -1.486411 -0.866242 0.068441 -0.112261 0.084143 0.404865 -1.121813 0.068799 1 0
12 0.802167 1.125872 0.181614 -0.054998 0.125978 -0.118093 -0.204100 -0.240702 0.725047 -0.060416 0.610221 0.097037 0.758946 0 0
13 -0.570677 -0.903477 0.077307 1.133679 -0.194704 -2.680079 -1.869617 0.413585 -0.778149 0.810256 0.538818 1.572772 2.002755 0 0
14 0.390585 1.185091 1.060521 -0.143387 0.154017 -0.184047 -0.178747 -1.592611 0.195876 0.676819 1.052625 -0.193902 0.538474 0 0
15 -0.294008 1.505226 0.525191 0.408188 0.012660 0.846051 0.444150 -0.303052 0.701059 0.270748 -0.345435 -0.554816 -0.663403 0 0
16 0.504074 0.589989 0.264178 -0.853628 -1.595569 -0.010760 0.220518 0.048998 0.250424 0.796887 -0.099404 -1.017561 -0.397998 1 0
17 -0.894319 -1.371343 -0.705746 0.481594 -0.141646 -0.042173 -0.245037 0.348273 0.688880 -0.027577 -0.865534 -1.210148 0.271172 1 0
18 0.851995 0.048612 0.066938 0.223581 -0.911164 -1.152240 -0.070054 -0.416772 -0.480978 -0.106632 -0.231062 -0.850191 0.896367 0 0
19 -0.660027 -0.598339 -0.142295 1.087024 -2.982385 -0.973575 0.199468 -2.163834 -0.508427 1.292035 -1.534447 -0.433825 0.590292 0 0
20 -0.566037 -2.086297 0.061463 -0.482666 -1.080805 0.084488 -0.543972 0.386352 -0.614930 -0.730628 -0.994692 0.783494 0.132430 1 0
21 1.221627 -0.027554 0.188106 -0.974894 0.005596 1.102640 -0.048800 -0.490088 -1.842707 0.102032 0.157223 0.601727 1.366630 0 0
22 -1.183193 -0.312675 -0.568276 -0.639410 -0.208614 0.261554 -0.216256 0.261899 0.331089 0.345426 0.485643 0.504079 0.397818 1 0
23 -0.801169 0.116605 -0.579690 0.362276 0.606309 -0.570757 -1.548593 0.718826 2.916797 2.075668 0.518297 -0.400812 -0.252481 1 0
24 -0.891340 0.309727 -0.842676 0.310338 0.577762 -0.334300 -1.473445 0.513897 2.805987 2.062959 0.635321 -0.248494 -0.258253 1 0
25 -0.130263 1.334103 0.730042 0.678688 -0.720646 -0.666357 0.000279 -0.436292 0.339978 1.010170 0.805445 0.781825 -0.399769 0 0
26 -1.602028 1.309495 0.611484 0.700892 -0.666054 -0.863765 -0.615702 -0.509079 0.601651 0.871639 -0.301423 -1.640656 -0.131728 0 0
27 -0.525208 1.525178 -0.030851 -0.074993 0.453039 1.307113 0.452591 -0.220734 0.853076 0.349373 -0.297713 0.168680 -0.091233 0 0
28 0.009250 -0.578236 -0.700373 -1.085258 -0.295803 -0.883641 2.594736 0.321904 0.390719 -1.529163 -0.511611 -1.805971 0.998148 1 0
29 0.280166 -0.217504 -2.181350 0.187090 0.622591 -2.686573 -0.214488 1.438992 1.094095 -1.319265 0.002128 -3.970580 -0.678777 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
186 -0.713223 -0.165097 0.170953 -0.307601 1.152684 -0.045434 -0.432254 0.388447 -0.400713 -0.024795 -0.444954 0.651175 0.856965 0 1
187 0.077144 -0.527913 0.300841 -0.007145 -0.844229 -0.260791 -1.196957 0.080170 0.801803 0.095003 -0.099609 0.698672 -0.673575 1 1
188 -0.818527 -0.256716 0.589448 -0.243361 0.027958 0.218148 0.278190 0.579538 0.547728 1.017426 0.085377 -1.012860 -1.117093 1 1
189 0.078094 -0.693228 -0.177029 0.143179 -2.181764 -1.150077 -0.455986 2.342589 0.559829 -0.323766 1.119820 0.558999 1.029247 1 1
190 0.473118 -0.619480 -0.613859 -1.390025 -2.181316 -1.933866 -1.862714 2.547096 0.230172 -1.472410 0.795467 0.361148 0.443935 1 1
191 -0.725236 0.975099 1.683062 -0.427378 1.353092 -0.378540 0.888469 0.944767 0.523458 -0.783620 0.384682 0.536515 0.242834 0 1
192 -0.098130 0.349984 0.651382 0.850819 0.452135 0.155512 0.327726 0.884508 -0.713577 -0.090647 0.810323 0.862330 0.315889 0 1
193 -0.629175 0.598836 0.560497 0.331765 0.692832 0.040925 0.021748 -0.346042 -0.072500 -0.054767 1.405269 0.652914 0.119071 0 1
194 -0.593283 -1.762364 -1.292266 -0.014741 0.048404 1.051485 1.378317 0.769360 -0.538726 -0.881937 -0.734744 -0.034792 0.551083 1 1
195 -0.220725 -0.706286 -0.558429 -0.543495 -0.762451 -0.724549 0.033926 0.427916 0.381670 0.691530 1.157624 0.554176 0.085881 1 1
196 0.542442 -0.314573 -0.389836 1.340826 -0.685860 1.357357 -0.180731 -0.134883 1.542849 -0.544367 -1.675576 0.661188 -1.008124 1 1
197 0.881881 0.686691 1.412427 0.067865 -0.239689 0.560162 0.252073 -1.596612 0.028578 0.915895 -0.464715 0.139984 0.221738 0 1
198 0.056082 0.354969 -0.320369 -0.059290 -0.029903 -0.037445 0.463365 1.036375 -0.996880 -0.421318 -0.105980 0.153928 -0.138210 1 1
199 -0.069357 0.007801 -0.207830 -0.057174 -0.226654 0.215090 -0.377980 0.770378 0.045795 0.749183 -0.041079 0.507759 0.857287 0 1
200 0.469206 -0.809604 -0.887115 -0.746687 -1.496004 0.062379 -0.191488 2.511061 0.807064 1.982195 -1.624512 1.054232 -1.213713 1 1
201 -0.020242 -0.015071 -0.121820 -0.371781 -0.501178 0.021533 -0.183053 0.947258 0.121773 0.744517 -0.020175 0.696027 1.176563 1 1
202 -0.850319 -0.167766 1.753294 -0.158230 1.464227 -1.912449 0.756675 -1.829681 1.018012 0.323314 -1.687154 0.771247 -1.800694 0 1
203 -1.592416 0.518318 -0.040842 -0.026786 0.407023 0.751642 0.136831 -0.519756 -0.647307 -0.663112 -0.108425 -0.070893 0.564588 0 1
204 -1.525605 0.408122 -0.068239 -0.027937 0.579957 0.945646 0.028783 -0.684904 -0.605690 -0.554342 0.077226 -0.032084 0.772622 0 1
205 0.460758 -0.673974 -1.175590 -0.042902 -0.354361 0.110479 -1.172032 -0.586283 0.068460 -0.294180 0.791941 0.725567 -0.557938 1 1
206 -0.654520 -0.315921 0.582093 0.860981 -2.255891 0.280515 -0.167860 -0.060341 0.774684 -0.965387 -0.272312 0.965091 -2.037667 1 1
207 -0.746404 -0.272009 0.915424 0.849620 -2.868535 0.209161 -0.279975 -0.187035 1.111337 -0.940798 -0.010679 1.676010 -2.095188 1 1
208 -0.406754 0.539656 0.644480 1.231427 0.299700 -0.877891 -0.636255 -0.696446 0.413124 0.006952 -0.652237 -0.705710 -0.605430 0 1
209 0.239265 2.028215 -0.262443 -0.154385 -0.408932 -1.139249 0.361170 -0.142219 1.390750 -0.247556 -1.030608 0.769854 0.315698 0 1
210 -0.275298 0.271105 0.428430 1.445529 -0.169781 -1.352517 -0.388072 -0.917276 -0.332714 -0.809595 -0.907645 -1.645002 -0.894372 0 1
211 0.151626 1.433998 2.658181 -0.681422 -0.210581 1.432182 0.997691 -2.562925 -0.348259 0.825118 2.785329 0.897498 1.962963 0 1
212 0.681872 0.671929 0.127597 -0.665368 0.282250 0.541495 1.003312 -0.196048 -0.380222 -1.300609 1.441472 1.382278 0.192263 0 1
213 0.706389 0.041252 -0.040976 -0.311682 -0.096455 0.762413 -0.450937 -0.134575 -0.102032 0.750741 -0.104707 -0.048909 0.682180 0 1
214 -1.588263 0.426822 -0.788645 -0.221386 0.983329 1.558246 1.332190 0.532878 -0.647525 -0.023726 -0.959608 1.274829 2.334968 0 1
215 -0.774487 -1.852850 -0.785635 -0.149473 -0.236634 0.462963 1.000185 -0.047619 -0.310695 -0.127651 0.490345 0.233474 0.163391 1 1

216 rows × 15 columns

In [215]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[215]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b829357fd0>
In [252]:
import itertools

from sklearn.metrics import confusion_matrix

Combinations

In [ ]:
two_combs = list(itertools.combinations(range(len(companies)), 2))
for comb in two_combs:
    print('## '+companies[comb[0]].upper()+' y '+companies[comb[1]].upper())
    X = df_n_ps_std_mfcc[comb[0]].append(df_n_ps_std_mfcc[comb[1]])
    y = df_n_ps[comb[0]]['chosen'].append(df_n_ps[comb[1]]['chosen'])

    print(X.head())
    X_train, X_test, y_train, y_test = train_test_split(X, y)

    mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))

    activation_vec = ['logistic', 'relu', 'tanh']
    max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
    hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                              (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
    learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
    batch_size_vec = [10, 20, 40, 60, 80, 100, 150]


    np.random.seed(1234)
    parametros = {'activation': activation_vec,
                  'max_iter':max_iter_vec,
                  'hidden_layer_sizes': hidden_layer_sizes_vec,
                  'learning_rate_init': learning_rate_init_vec#,
                  #'batch_size': batch_size_vec
                  }
    scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
    grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)

    grid.fit(X_train, y_train)

    print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
        grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))

    n0=X_train.shape[1]
    ### hidden_layer_sizes
    ns = []
    for i in range (len(grid.best_params_['hidden_layer_sizes'])):
        ns.append(grid.best_params_['hidden_layer_sizes'][i])

    ns.append(1)
    lr = grid.best_params_['learning_rate_init']
    epochs = grid.best_params_['max_iter']
    if grid.best_params_['activation'] == 'logistic':
        grid.best_params_['activation'] = 'sigmoid'
    
    input_tensor = Input(shape = (n0,))
    hidden_outputs = [input_tensor]
    for i in range (len(ns)-1):
        hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])

    model = Model([input_tensor], [classification_output])
    weights = model.get_weights()

    print(model.summary())

    model.set_weights(weights)
    adam = keras.optimizers.Adam(lr=lr)
    model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
    history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), verbose=0, 
                callbacks=[
                    keras.callbacks.ReduceLROnPlateau(
                        monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=0
                    )
                ]
             )

    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    epochs = range(len(acc))

    print("epochs: "+str(len(acc)))

    test_loss, test_acc = model.evaluate(X_test, y_test)
    print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))

    y_pred = model.predict(X_test)
    print("AUC ROC: ",roc_auc_score(y_test, y_pred))

    y_pred = list(map(lambda i: int(i>=0.5), y_pred))
    print("Kappa: ",cohen_kappa_score(y_test, y_pred))

    print(confusion_matrix(y_test, y_pred))
## ARTE FRANCÉS y CLUB DE BANQUEROS Y EMPRESARIOS
   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.008, 'max_iter': 2000}, que permiten obtener un Accuracy de 78.22% y un Kappa del 43.55
Model: "model_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 30)                420       
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
None
epochs: 2000
143/143 [==============================] - 0s 42us/step
test loss: 0.8208914674245394, test accuracy: 0.6643356680870056
AUC ROC:  0.6193208990913438
Kappa:  0.16719242902208198
[[79 23]
 [25 16]]
## ARTE FRANCÉS y GRAMMA
   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
C:\ProgramData\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (20) reached and the optimization hasn't converged yet.
  % self.max_iter, ConvergenceWarning)
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.008, 'max_iter': 20}, que permiten obtener un Accuracy de 73.87% y un Kappa del 21.99
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 13)                0         
_________________________________________________________________
dense_3 (Dense)              (None, 30)                420       
_________________________________________________________________
dense_4 (Dense)              (None, 20)                620       
_________________________________________________________________
dense_5 (Dense)              (None, 10)                210       
_________________________________________________________________
dense_6 (Dense)              (None, 1)                 11        
=================================================================
Total params: 1,261
Trainable params: 1,261
Non-trainable params: 0
_________________________________________________________________
None
epochs: 20
133/133 [==============================] - 0s 68us/step
test loss: 0.9437371667166402, test accuracy: 0.6992481350898743
AUC ROC:  0.6608910891089109
Kappa:  0.22651933701657456
[[78 23]
 [17 15]]
## ARTE FRANCÉS y HOTEL MARRAKECH
   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
In [253]:
from IPython.display import display, Markdown, Latex
two_combs = list(itertools.combinations(range(len(companies)), 2))
for comb in two_combs[2:]:
    display(Markdown('## '+companies[comb[0]]+' y '+companies[comb[1]]))
    X = df_n_ps_std_mfcc[comb[0]].append(df_n_ps_std_mfcc[comb[1]])
    y = df_n_ps[comb[0]]['chosen'].append(df_n_ps[comb[1]]['chosen'])

    print(X.head())
    X_train, X_test, y_train, y_test = train_test_split(X, y)

    mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))

    activation_vec = ['logistic', 'relu', 'tanh']
    max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
    hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                              (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
    learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
    batch_size_vec = [10, 20, 40, 60, 80, 100, 150]


    np.random.seed(1234)
    parametros = {'activation': activation_vec,
                  'max_iter':max_iter_vec,
                  'hidden_layer_sizes': hidden_layer_sizes_vec,
                  'learning_rate_init': learning_rate_init_vec#,
                  #'batch_size': batch_size_vec
                  }
    scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
    grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)

    grid.fit(X_train, y_train)

    print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
        grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))

    n0=X_train.shape[1]
    ### hidden_layer_sizes
    ns = []
    for i in range (len(grid.best_params_['hidden_layer_sizes'])):
        ns.append(grid.best_params_['hidden_layer_sizes'][i])

    ns.append(1)
    lr = grid.best_params_['learning_rate_init']
    epochs = grid.best_params_['max_iter']

    input_tensor = Input(shape = (n0,))
    hidden_outputs = [input_tensor]
    if grid.best_params_['activation'] == 'logistic':
        grid.best_params_['activation'] = 'sigmoid'
    for i in range (len(ns)-1):
        hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])

    model = Model([input_tensor], [classification_output])
    weights = model.get_weights()

    print(model.summary())

    model.set_weights(weights)
    adam = keras.optimizers.Adam(lr=lr)
    model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
    history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), verbose=0, 
                callbacks=[
                    keras.callbacks.ReduceLROnPlateau(
                        monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=0
                    )
                ]
             )

    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    epochs = range(len(acc))

    print("epochs: "+str(len(acc)))

    test_loss, test_acc = model.evaluate(X_test, y_test)
    print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))

    y_pred = model.predict(X_test)
    print("AUC ROC: ",roc_auc_score(y_test, y_pred))

    y_pred = list(map(lambda i: int(i>=0.5), y_pred))
    print("Kappa: ",cohen_kappa_score(y_test, y_pred))

    print(confusion_matrix(y_test, y_pred))
## ARTE FRANCÉS y HOTEL MARRAKECH
   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.02, 'max_iter': 200}, que permiten obtener un Accuracy de 74.71% y un Kappa del 41.29
Model: "model_13"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_14 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_38 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_39 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_40 (Dense)             (None, 1)                 31        
=================================================================
Total params: 1,381
Trainable params: 1,381
Non-trainable params: 0
_________________________________________________________________
None
epochs: 200
115/115 [==============================] - 0s 52us/step
test loss: 1.730247563901155, test accuracy: 0.695652186870575
AUC ROC:  0.6560714285714285
Kappa:  0.25116279069767444
[[65 15]
 [20 15]]
## ARTE FRANCÉS y SPECIALIZED
   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.006, 'max_iter': 2000}, que permiten obtener un Accuracy de 68.25% y un Kappa del 27.72
Model: "model_14"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_15 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_41 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_42 (Dense)             (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
None
epochs: 2000
134/134 [==============================] - 0s 52us/step
test loss: 0.643175168713527, test accuracy: 0.6567164063453674
AUC ROC:  0.6184593023255814
Kappa:  0.16067538126361647
[[75 11]
 [35 13]]
## ARTE FRANCÉS y URBAN PLACE
   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-253-edca9a64a2bf> in <module>
     28     grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
     29 
---> 30     grid.fit(X_train, y_train)
     31 
     32     print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(

C:\ProgramData\Anaconda3\lib\site-packages\sklearn\model_selection\_search.py in fit(self, X, y, groups, **fit_params)
    638                                   error_score=self.error_score)
    639           for parameters, (train, test) in product(candidate_params,
--> 640                                                    cv.split(X, y, groups)))
    641 
    642         # if one choose to see train score, "out" will contain train score info

C:\ProgramData\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in __call__(self, iterable)
    787                 # consumption.
    788                 self._iterating = False
--> 789             self.retrieve()
    790             # Make sure that we get a last message telling us we are done
    791             elapsed_time = time.time() - self._start_time

C:\ProgramData\Anaconda3\lib\site-packages\sklearn\externals\joblib\parallel.py in retrieve(self)
    697             try:
    698                 if getattr(self._backend, 'supports_timeout', False):
--> 699                     self._output.extend(job.get(timeout=self.timeout))
    700                 else:
    701                     self._output.extend(job.get())

C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in get(self, timeout)
    649 
    650     def get(self, timeout=None):
--> 651         self.wait(timeout)
    652         if not self.ready():
    653             raise TimeoutError

C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in wait(self, timeout)
    646 
    647     def wait(self, timeout=None):
--> 648         self._event.wait(timeout)
    649 
    650     def get(self, timeout=None):

C:\ProgramData\Anaconda3\lib\threading.py in wait(self, timeout)
    550             signaled = self._flag
    551             if not signaled:
--> 552                 signaled = self._cond.wait(timeout)
    553             return signaled
    554 

C:\ProgramData\Anaconda3\lib\threading.py in wait(self, timeout)
    294         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    295             if timeout is None:
--> 296                 waiter.acquire()
    297                 gotit = True
    298             else:

KeyboardInterrupt: 
In [391]:
from IPython.display import display, Markdown, Latex
two_combs = list(itertools.combinations(range(len(companies)), 2))
for comb in two_combs[4:]:
    display(Markdown('## '+companies[comb[0]]+' y '+companies[comb[1]]))
    X = df_n_ps_std_mfcc[comb[0]].append(df_n_ps_std_mfcc[comb[1]])
    y = df_n_ps[comb[0]]['chosen'].append(df_n_ps[comb[1]]['chosen'])

    print(X.head())
    X_train, X_test, y_train, y_test = train_test_split(X, y)

    mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))

    activation_vec = ['logistic', 'relu', 'tanh']
    max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
    hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                              (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
    learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
    batch_size_vec = [10, 20, 40, 60, 80, 100, 150]


    np.random.seed(1234)
    parametros = {'activation': activation_vec,
                  'max_iter':max_iter_vec,
                  'hidden_layer_sizes': hidden_layer_sizes_vec,
                  'learning_rate_init': learning_rate_init_vec#,
                  #'batch_size': batch_size_vec
                  }
    scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
    grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)

    grid.fit(X_train, y_train)

    print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
        grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))

    n0=X_train.shape[1]
    ### hidden_layer_sizes
    ns = []
    for i in range (len(grid.best_params_['hidden_layer_sizes'])):
        ns.append(grid.best_params_['hidden_layer_sizes'][i])

    ns.append(1)
    lr = grid.best_params_['learning_rate_init']
    epochs = grid.best_params_['max_iter']

    input_tensor = Input(shape = (n0,))
    hidden_outputs = [input_tensor]
    if grid.best_params_['activation'] == 'logistic':
        grid.best_params_['activation'] = 'sigmoid'
    for i in range (len(ns)-1):
        hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])

    model = Model([input_tensor], [classification_output])
    weights = model.get_weights()

    print(model.summary())

    model.set_weights(weights)
    adam = keras.optimizers.Adam(lr=lr)
    model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
    history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), verbose=0, 
                callbacks=[
                    keras.callbacks.ReduceLROnPlateau(
                        monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=0
                    )
                ]
             )

    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    epochs = range(len(acc))

    print("epochs: "+str(len(acc)))

    test_loss, test_acc = model.evaluate(X_test, y_test)
    print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))

    y_pred = model.predict(X_test)
    print("AUC ROC: ",roc_auc_score(y_test, y_pred))

    y_pred = list(map(lambda i: int(i>=0.5), y_pred))
    print("Kappa: ",cohen_kappa_score(y_test, y_pred))

    print(confusion_matrix(y_test, y_pred))

Arte Francés y Urban Place

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.297583     1.225637    -0.367641     0.606499     0.072373   
1     0.637676    -1.507256    -1.572737    -0.954161    -0.857425   
2     2.236730    -0.319414     0.669910    -1.918119    -0.820882   
3     0.662077    -0.381499     0.111981    -1.743808    -1.317593   
4     0.736502     0.112932    -0.065024    -1.049458    -0.408043   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -2.029620     0.791469     0.752018     2.268802     -1.383289   
1     0.327005     0.816764     0.214245     0.241703      0.637066   
2    -2.379333    -1.570021    -2.755344    -2.150610     -2.528577   
3    -1.348534    -0.627198    -1.629882    -2.075974     -1.248765   
4    -0.437499     0.090831    -0.852983    -1.922491     -0.284365   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0      0.548279      1.903211     -1.011470  
1      1.601538      0.300317     -0.466779  
2     -0.877081     -0.522248     -1.429911  
3     -1.126014     -1.316359     -1.126174  
4      0.210624     -0.032122     -0.700183  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.009, 'max_iter': 2000}, que permiten obtener un Accuracy de 74.12% y un Kappa del 33.40
Model: "model_20"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_22 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_59 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_60 (Dense)             (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
None
epochs: 2000
133/133 [==============================] - 0s 308us/step
test loss: 0.7585942951360143, test accuracy: 0.7067669034004211
AUC ROC:  0.6449612403100775
Kappa:  0.2724084724365269
[[77 13]
 [26 17]]

Club De Banqueros y Empresarios y Gramma

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.339415     0.847773     0.497198    -0.389310     1.225458   
1     0.587658    -1.195426     0.636375     0.199876     0.765321   
2     1.465595    -2.307943     0.354567    -0.058273    -1.298853   
3     0.749403    -1.690498    -0.125200    -1.016135     0.825845   
4    -0.280577     0.393332     0.744917     2.411400    -0.777421   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     1.947033    -0.736267     0.492219     0.576682      1.504697   
1     0.061181     0.379367    -0.440867     0.232893      1.339920   
2    -0.811453    -1.551580    -3.934320    -1.079432      2.546130   
3     0.271444    -0.104786    -0.992141     0.049182      1.425948   
4    -0.420018     1.258355    -1.544565    -0.498071      0.421527   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -1.796460      0.724954      0.958600  
1      0.110001      0.807525      0.815678  
2      1.421407      0.639359      0.199094  
3     -0.343269     -0.789558     -0.411898  
4     -0.632908     -0.056846     -0.072348  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (20, 10), 'learning_rate_init': 0.004, 'max_iter': 2000}, que permiten obtener un Accuracy de 78.47% y un Kappa del 42.89
Model: "model_21"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_23 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_61 (Dense)             (None, 20)                280       
_________________________________________________________________
dense_62 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_63 (Dense)             (None, 1)                 11        
=================================================================
Total params: 501
Trainable params: 501
Non-trainable params: 0
_________________________________________________________________
None
epochs: 2000
118/118 [==============================] - 0s 42us/step
test loss: 0.5979639458454261, test accuracy: 0.7203390002250671
AUC ROC:  0.7169750081142485
Kappa:  0.3194687172317372
[[68 11]
 [22 17]]

Club De Banqueros y Empresarios y Hotel Marrakech

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.339415     0.847773     0.497198    -0.389310     1.225458   
1     0.587658    -1.195426     0.636375     0.199876     0.765321   
2     1.465595    -2.307943     0.354567    -0.058273    -1.298853   
3     0.749403    -1.690498    -0.125200    -1.016135     0.825845   
4    -0.280577     0.393332     0.744917     2.411400    -0.777421   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     1.947033    -0.736267     0.492219     0.576682      1.504697   
1     0.061181     0.379367    -0.440867     0.232893      1.339920   
2    -0.811453    -1.551580    -3.934320    -1.079432      2.546130   
3     0.271444    -0.104786    -0.992141     0.049182      1.425948   
4    -0.420018     1.258355    -1.544565    -0.498071      0.421527   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -1.796460      0.724954      0.958600  
1      0.110001      0.807525      0.815678  
2      1.421407      0.639359      0.199094  
3     -0.343269     -0.789558     -0.411898  
4     -0.632908     -0.056846     -0.072348  
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (20, 20), 'learning_rate_init': 0.01, 'max_iter': 75}, que permiten obtener un Accuracy de 76.92% y un Kappa del 48.20
Model: "model_22"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_24 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_64 (Dense)             (None, 20)                280       
_________________________________________________________________
dense_65 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_66 (Dense)             (None, 1)                 21        
=================================================================
Total params: 721
Trainable params: 721
Non-trainable params: 0
_________________________________________________________________
None
epochs: 75
100/100 [==============================] - 0s 50us/step
test loss: 0.8634896659851075, test accuracy: 0.7200000286102295
AUC ROC:  0.78
Kappa:  0.35779816513761475
[[54 16]
 [12 18]]

Club De Banqueros y Empresarios y Specialized

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.339415     0.847773     0.497198    -0.389310     1.225458   
1     0.587658    -1.195426     0.636375     0.199876     0.765321   
2     1.465595    -2.307943     0.354567    -0.058273    -1.298853   
3     0.749403    -1.690498    -0.125200    -1.016135     0.825845   
4    -0.280577     0.393332     0.744917     2.411400    -0.777421   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     1.947033    -0.736267     0.492219     0.576682      1.504697   
1     0.061181     0.379367    -0.440867     0.232893      1.339920   
2    -0.811453    -1.551580    -3.934320    -1.079432      2.546130   
3     0.271444    -0.104786    -0.992141     0.049182      1.425948   
4    -0.420018     1.258355    -1.544565    -0.498071      0.421527   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -1.796460      0.724954      0.958600  
1      0.110001      0.807525      0.815678  
2      1.421407      0.639359      0.199094  
3     -0.343269     -0.789558     -0.411898  
4     -0.632908     -0.056846     -0.072348  
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.002, 'max_iter': 100}, que permiten obtener un Accuracy de 74.37% y un Kappa del 45.55
Model: "model_23"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_25 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_67 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_68 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_69 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_70 (Dense)             (None, 1)                 31        
=================================================================
Total params: 2,311
Trainable params: 2,311
Non-trainable params: 0
_________________________________________________________________
None
epochs: 100
119/119 [==============================] - 0s 50us/step
test loss: 0.6695057942586786, test accuracy: 0.6974790096282959
AUC ROC:  0.7086538461538461
Kappa:  0.30431958428061057
[[63 17]
 [19 20]]

Club De Banqueros y Empresarios y Urban Place

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.339415     0.847773     0.497198    -0.389310     1.225458   
1     0.587658    -1.195426     0.636375     0.199876     0.765321   
2     1.465595    -2.307943     0.354567    -0.058273    -1.298853   
3     0.749403    -1.690498    -0.125200    -1.016135     0.825845   
4    -0.280577     0.393332     0.744917     2.411400    -0.777421   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     1.947033    -0.736267     0.492219     0.576682      1.504697   
1     0.061181     0.379367    -0.440867     0.232893      1.339920   
2    -0.811453    -1.551580    -3.934320    -1.079432      2.546130   
3     0.271444    -0.104786    -0.992141     0.049182      1.425948   
4    -0.420018     1.258355    -1.544565    -0.498071      0.421527   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -1.796460      0.724954      0.958600  
1      0.110001      0.807525      0.815678  
2      1.421407      0.639359      0.199094  
3     -0.343269     -0.789558     -0.411898  
4     -0.632908     -0.056846     -0.072348  
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.02, 'max_iter': 50}, que permiten obtener un Accuracy de 72.24% y un Kappa del 11.68
Model: "model_24"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_26 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_71 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_72 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_73 (Dense)             (None, 1)                 31        
=================================================================
Total params: 1,381
Trainable params: 1,381
Non-trainable params: 0
_________________________________________________________________
None
epochs: 50
118/118 [==============================] - 0s 34us/step
test loss: 0.5854327123043901, test accuracy: 0.7033898234367371
AUC ROC:  0.6773026315789473
Kappa:  0.1924129839655847
[[74  6]
 [29  9]]

Gramma y Hotel Marrakech

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.674917     0.169246     0.673543     1.157142    -0.633186   
1     0.277269     0.514176     0.200398     0.988939    -1.756594   
2     1.483921     0.724793     0.473099     0.439577    -0.358096   
3    -0.734008    -0.683844    -0.764866    -0.225060    -0.261235   
4    -0.834815    -0.735908    -1.177596    -0.093532     0.508050   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     0.688145     0.215883    -0.452048     1.101066      0.064017   
1    -0.022788    -0.235704     0.523508    -0.604231      1.188209   
2    -0.452581    -0.213173    -0.596057    -0.767473      0.696227   
3    -0.243429     0.588768     0.874148     1.302526      0.091256   
4     0.503458     1.380798     1.847226     1.227896      0.017729   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -0.153703      1.751289      0.812723  
1      0.863617     -0.801768      0.229305  
2     -0.111259     -0.370649     -1.325817  
3     -0.600323     -0.827452      0.390838  
4     -0.329325     -0.953249     -0.125917  
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.009, 'max_iter': 300}, que permiten obtener un Accuracy de 73.33% y un Kappa del 36.99
Model: "model_25"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_27 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_74 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_75 (Dense)             (None, 20)                620       
_________________________________________________________________
dense_76 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_77 (Dense)             (None, 1)                 11        
=================================================================
Total params: 1,261
Trainable params: 1,261
Non-trainable params: 0
_________________________________________________________________
None
epochs: 300
90/90 [==============================] - 0s 33us/step
test loss: 2.603563933240043, test accuracy: 0.6555555462837219
AUC ROC:  0.6552779194288629
Kappa:  0.2552055525894287
[[43 10]
 [21 16]]

Gramma y Specialized

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.674917     0.169246     0.673543     1.157142    -0.633186   
1     0.277269     0.514176     0.200398     0.988939    -1.756594   
2     1.483921     0.724793     0.473099     0.439577    -0.358096   
3    -0.734008    -0.683844    -0.764866    -0.225060    -0.261235   
4    -0.834815    -0.735908    -1.177596    -0.093532     0.508050   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     0.688145     0.215883    -0.452048     1.101066      0.064017   
1    -0.022788    -0.235704     0.523508    -0.604231      1.188209   
2    -0.452581    -0.213173    -0.596057    -0.767473      0.696227   
3    -0.243429     0.588768     0.874148     1.302526      0.091256   
4     0.503458     1.380798     1.847226     1.227896      0.017729   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -0.153703      1.751289      0.812723  
1      0.863617     -0.801768      0.229305  
2     -0.111259     -0.370649     -1.325817  
3     -0.600323     -0.827452      0.390838  
4     -0.329325     -0.953249     -0.125917  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.005, 'max_iter': 1000}, que permiten obtener un Accuracy de 72.39% y un Kappa del 41.65
Model: "model_26"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_28 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_78 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_79 (Dense)             (None, 1)                 31        
=================================================================
Total params: 451
Trainable params: 451
Non-trainable params: 0
_________________________________________________________________
None
epochs: 1000
109/109 [==============================] - 0s 37us/step
test loss: 0.734237842603561, test accuracy: 0.5596330165863037
AUC ROC:  0.5234265734265734
Kappa:  0.02823179791976227
[[49 16]
 [32 12]]

Gramma y Urban Place

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0    -0.674917     0.169246     0.673543     1.157142    -0.633186   
1     0.277269     0.514176     0.200398     0.988939    -1.756594   
2     1.483921     0.724793     0.473099     0.439577    -0.358096   
3    -0.734008    -0.683844    -0.764866    -0.225060    -0.261235   
4    -0.834815    -0.735908    -1.177596    -0.093532     0.508050   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     0.688145     0.215883    -0.452048     1.101066      0.064017   
1    -0.022788    -0.235704     0.523508    -0.604231      1.188209   
2    -0.452581    -0.213173    -0.596057    -0.767473      0.696227   
3    -0.243429     0.588768     0.874148     1.302526      0.091256   
4     0.503458     1.380798     1.847226     1.227896      0.017729   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -0.153703      1.751289      0.812723  
1      0.863617     -0.801768      0.229305  
2     -0.111259     -0.370649     -1.325817  
3     -0.600323     -0.827452      0.390838  
4     -0.329325     -0.953249     -0.125917  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (10, 10), 'learning_rate_init': 0.001, 'max_iter': 50}, que permiten obtener un Accuracy de 71.91% y un Kappa del 15.28
Model: "model_27"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_29 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_80 (Dense)             (None, 10)                140       
_________________________________________________________________
dense_81 (Dense)             (None, 10)                110       
_________________________________________________________________
dense_82 (Dense)             (None, 1)                 11        
=================================================================
Total params: 261
Trainable params: 261
Non-trainable params: 0
_________________________________________________________________
None
epochs: 50
108/108 [==============================] - 0s 56us/step
test loss: 0.6653945070725901, test accuracy: 0.5925925970077515
AUC ROC:  0.5785714285714285
Kappa:  -0.08990825688073412
[[63  7]
 [37  1]]

Hotel Marrakech y Specialized

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.221235     1.617887     0.929874    -0.231486    -0.525862   
1     0.836735    -0.529605    -1.268139    -0.791053     0.815880   
2    -0.190995     1.202756     0.050028    -2.631154     3.701544   
3     0.521202     1.354284     1.423683    -0.634173     0.934734   
4     0.250234     1.586078    -1.791096     0.127156     1.573000   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     1.384826     0.709441     0.512679    -2.231286     -2.278872   
1    -1.992230    -0.371430    -0.356669     1.323871      0.946394   
2    -1.158173     0.439586     2.317548    -2.282526     -1.571775   
3     0.214772    -0.349135     1.009101    -2.193012     -0.301254   
4     0.288525     1.962471     1.500627     1.352853     -1.921935   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -0.728806     -2.187766     -1.206544  
1     -1.085097      0.673490     -1.496313  
2     -2.541951     -2.587380     -2.132445  
3     -0.356046     -0.668937     -0.421263  
4      0.705405     -0.230103     -0.803009  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.009, 'max_iter': 75}, que permiten obtener un Accuracy de 67.65% y un Kappa del 34.61
Model: "model_28"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_30 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_83 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_84 (Dense)             (None, 20)                620       
_________________________________________________________________
dense_85 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_86 (Dense)             (None, 1)                 11        
=================================================================
Total params: 1,261
Trainable params: 1,261
Non-trainable params: 0
_________________________________________________________________
None
epochs: 75
91/91 [==============================] - 0s 33us/step
test loss: 1.3281121267067206, test accuracy: 0.6373626589775085
AUC ROC:  0.6967455621301775
Kappa:  0.247557003257329
[[38 14]
 [19 20]]

Hotel Marrakech y Urban Place

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.221235     1.617887     0.929874    -0.231486    -0.525862   
1     0.836735    -0.529605    -1.268139    -0.791053     0.815880   
2    -0.190995     1.202756     0.050028    -2.631154     3.701544   
3     0.521202     1.354284     1.423683    -0.634173     0.934734   
4     0.250234     1.586078    -1.791096     0.127156     1.573000   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0     1.384826     0.709441     0.512679    -2.231286     -2.278872   
1    -1.992230    -0.371430    -0.356669     1.323871      0.946394   
2    -1.158173     0.439586     2.317548    -2.282526     -1.571775   
3     0.214772    -0.349135     1.009101    -2.193012     -0.301254   
4     0.288525     1.962471     1.500627     1.352853     -1.921935   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -0.728806     -2.187766     -1.206544  
1     -1.085097      0.673490     -1.496313  
2     -2.541951     -2.587380     -2.132445  
3     -0.356046     -0.668937     -0.421263  
4      0.705405     -0.230103     -0.803009  
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.003, 'max_iter': 2000}, que permiten obtener un Accuracy de 71.48% y un Kappa del 37.61
Model: "model_29"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_31 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_87 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_88 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_89 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_90 (Dense)             (None, 1)                 31        
=================================================================
Total params: 2,311
Trainable params: 2,311
Non-trainable params: 0
_________________________________________________________________
None
epochs: 2000
90/90 [==============================] - 0s 33us/step
test loss: 1.0477391746309068, test accuracy: 0.644444465637207
AUC ROC:  0.6894444444444443
Kappa:  0.1428571428571429
[[48 12]
 [20 10]]

Specialized y Urban Place

   mfccfiles_1  mfccfiles_2  mfccfiles_3  mfccfiles_4  mfccfiles_5  \
0     0.992062    -0.477172    -1.079451    -2.369470    -1.705431   
1     0.843575    -0.507672    -0.731713    -0.334904     1.442336   
2     0.816922    -0.263544     0.639646    -0.865417     1.276602   
3     4.368525     0.851784    -0.671158    -0.128467     2.141169   
4     0.001312     0.535305    -0.648296     0.221414     0.549478   

   mfccfiles_6  mfccfiles_7  mfccfiles_8  mfccfiles_9  mfccfiles_10  \
0    -0.098594    -0.281836    -1.432001    -0.898623      0.130446   
1    -0.491141    -0.266416    -0.511246     1.004414      0.558777   
2    -0.245238     0.106722    -0.761365    -0.170481     -1.443667   
3    -0.472725    -1.437233    -1.858760     1.581800     -0.145852   
4     0.736878    -0.439538    -0.138787     0.584258      0.095671   

   mfccfiles_11  mfccfiles_12  mfccfiles_13  
0     -0.024683     -0.312128      0.020392  
1      0.127114     -1.667555      0.835458  
2     -0.451102      1.196430     -0.037846  
3      0.107228      1.458238      1.666081  
4      1.901833      2.909252      1.802578  
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30, 30, 30), 'learning_rate_init': 0.01, 'max_iter': 50}, que permiten obtener un Accuracy de 68.10% y un Kappa del 33.67
Model: "model_30"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_32 (InputLayer)        (None, 13)                0         
_________________________________________________________________
dense_91 (Dense)             (None, 30)                420       
_________________________________________________________________
dense_92 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_93 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_94 (Dense)             (None, 1)                 31        
=================================================================
Total params: 2,311
Trainable params: 2,311
Non-trainable params: 0
_________________________________________________________________
None
epochs: 50
109/109 [==============================] - 0s 37us/step
test loss: 1.1675174198019396, test accuracy: 0.6880733966827393
AUC ROC:  0.743271221532091
Kappa:  0.3372675250357654
[[52 11]
 [23 23]]

Tonal Centroid

In [131]:
df_n_ps_std[0].columns
Out[131]:
Index(['durationfiles', 'rmsfiles', 'rmsmedianfiles', 'lowenergyfiles',
       'ASRfiles', 'beatspectrumfiles', 'eventdensityfiles', 'tempofiles',
       'pulseclarityfiles', 'zerocrossfiles', 'rolloffsfiles',
       'brightnessfiles', 'spreadfiles', 'centroidfiles', 'kurtosisfiles',
       'flatnessfiles', 'entropyfiles', 'mfccfiles_1', 'mfccfiles_2',
       'mfccfiles_3', 'mfccfiles_4', 'mfccfiles_5', 'mfccfiles_6',
       'mfccfiles_7', 'mfccfiles_8', 'mfccfiles_9', 'mfccfiles_10',
       'mfccfiles_11', 'mfccfiles_12', 'mfccfiles_13', 'inharmonicityfiles',
       'bestkeyfiles', 'keyclarityfiles', 'modalityfiles',
       'tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6',
       'chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12',
       'attackslopefiles', 'attackleapfiles', 'chosen'],
      dtype='object')
In [132]:
df_n_ps_std[0].columns[34:40]
Out[132]:
Index(['tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6'],
      dtype='object')
In [133]:
df_n_ps_std_tc = [None]*len(companies)
for i in range(len(companies)):
    df_n_ps_std_tc[i] = pd.DataFrame(df_n_ps_std[i].iloc[:,34:40])
    df_n_ps_std_tc[i].columns=df_n_ps_std[i].columns[34:40]
df_n_ps_std_tc[0].info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 315 entries, 0 to 314
Data columns (total 6 columns):
tonalcentroidfiles_1    315 non-null float64
tonalcentroidfiles_2    315 non-null float64
tonalcentroidfiles_3    315 non-null float64
tonalcentroidfiles_4    315 non-null float64
tonalcentroidfiles_5    315 non-null float64
tonalcentroidfiles_6    315 non-null float64
dtypes: float64(6)
memory usage: 14.8 KB

Arte Francés

ANN

In [134]:
X = df_n_ps_std_tc[0]
In [135]:
y = df_n_ps[0]['chosen']
In [136]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [137]:
X_train.shape
Out[137]:
(236, 6)
In [138]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [139]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [140]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [226]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (10, 10, 10), 'learning_rate_init': 0.001, 'max_iter': 100}, que permiten obtener un Accuracy de 76.27% y un Kappa del 13.18
Tiempo total: 23.63 minutos
In [141]:
grid.best_params_= {'activation': 'tanh', 'hidden_layer_sizes': (10, 10, 10), 'learning_rate_init': 0.001, 'max_iter': 100}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [142]:
input_tensor = Input(shape = (n0,))
In [143]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [144]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [145]:
model.summary()
Model: "model_7"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_8 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_20 (Dense)             (None, 10)                70        
_________________________________________________________________
dense_21 (Dense)             (None, 10)                110       
_________________________________________________________________
dense_22 (Dense)             (None, 10)                110       
_________________________________________________________________
dense_23 (Dense)             (None, 1)                 11        
=================================================================
Total params: 301
Trainable params: 301
Non-trainable params: 0
_________________________________________________________________
In [146]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 236 samples, validate on 79 samples
Epoch 1/100
236/236 [==============================] - 0s 970us/step - loss: 0.7070 - accuracy: 0.4915 - val_loss: 0.7205 - val_accuracy: 0.4430
Epoch 2/100
236/236 [==============================] - 0s 80us/step - loss: 0.6903 - accuracy: 0.5127 - val_loss: 0.7068 - val_accuracy: 0.4810
Epoch 3/100
236/236 [==============================] - 0s 68us/step - loss: 0.6757 - accuracy: 0.5508 - val_loss: 0.6955 - val_accuracy: 0.4810
Epoch 4/100
236/236 [==============================] - 0s 97us/step - loss: 0.6638 - accuracy: 0.5975 - val_loss: 0.6854 - val_accuracy: 0.5570
Epoch 5/100
236/236 [==============================] - 0s 72us/step - loss: 0.6517 - accuracy: 0.6271 - val_loss: 0.6767 - val_accuracy: 0.6076
Epoch 6/100
236/236 [==============================] - 0s 68us/step - loss: 0.6413 - accuracy: 0.6737 - val_loss: 0.6680 - val_accuracy: 0.6456
Epoch 7/100
236/236 [==============================] - 0s 68us/step - loss: 0.6329 - accuracy: 0.7246 - val_loss: 0.6604 - val_accuracy: 0.6962
Epoch 8/100
236/236 [==============================] - 0s 72us/step - loss: 0.6245 - accuracy: 0.7246 - val_loss: 0.6541 - val_accuracy: 0.7089
Epoch 9/100
236/236 [==============================] - 0s 110us/step - loss: 0.6168 - accuracy: 0.7288 - val_loss: 0.6486 - val_accuracy: 0.7089
Epoch 10/100
236/236 [==============================] - 0s 93us/step - loss: 0.6098 - accuracy: 0.7288 - val_loss: 0.6442 - val_accuracy: 0.7089
Epoch 11/100
236/236 [==============================] - 0s 89us/step - loss: 0.6043 - accuracy: 0.7288 - val_loss: 0.6406 - val_accuracy: 0.7089
Epoch 12/100
236/236 [==============================] - 0s 89us/step - loss: 0.5992 - accuracy: 0.7288 - val_loss: 0.6369 - val_accuracy: 0.7089
Epoch 13/100
236/236 [==============================] - 0s 97us/step - loss: 0.5948 - accuracy: 0.7288 - val_loss: 0.6344 - val_accuracy: 0.7089
Epoch 14/100
236/236 [==============================] - 0s 85us/step - loss: 0.5915 - accuracy: 0.7288 - val_loss: 0.6306 - val_accuracy: 0.7089
Epoch 15/100
236/236 [==============================] - 0s 102us/step - loss: 0.5877 - accuracy: 0.7288 - val_loss: 0.6292 - val_accuracy: 0.7089
Epoch 16/100
236/236 [==============================] - 0s 93us/step - loss: 0.5862 - accuracy: 0.7288 - val_loss: 0.6263 - val_accuracy: 0.7089
Epoch 17/100
236/236 [==============================] - 0s 106us/step - loss: 0.5835 - accuracy: 0.7288 - val_loss: 0.6253 - val_accuracy: 0.7089
Epoch 18/100
236/236 [==============================] - 0s 97us/step - loss: 0.5818 - accuracy: 0.7288 - val_loss: 0.6246 - val_accuracy: 0.7089

Epoch 00018: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 19/100
236/236 [==============================] - 0s 93us/step - loss: 0.5804 - accuracy: 0.7288 - val_loss: 0.6240 - val_accuracy: 0.7089
Epoch 20/100
236/236 [==============================] - 0s 97us/step - loss: 0.5800 - accuracy: 0.7288 - val_loss: 0.6227 - val_accuracy: 0.7089
Epoch 21/100
236/236 [==============================] - 0s 89us/step - loss: 0.5795 - accuracy: 0.7288 - val_loss: 0.6221 - val_accuracy: 0.7089
Epoch 22/100
236/236 [==============================] - 0s 89us/step - loss: 0.5789 - accuracy: 0.7288 - val_loss: 0.6217 - val_accuracy: 0.7089
Epoch 23/100
236/236 [==============================] - 0s 93us/step - loss: 0.5784 - accuracy: 0.7288 - val_loss: 0.6214 - val_accuracy: 0.7089
Epoch 24/100
236/236 [==============================] - 0s 93us/step - loss: 0.5778 - accuracy: 0.7288 - val_loss: 0.6212 - val_accuracy: 0.7089
Epoch 25/100
236/236 [==============================] - 0s 93us/step - loss: 0.5776 - accuracy: 0.7288 - val_loss: 0.6207 - val_accuracy: 0.7089
Epoch 26/100
236/236 [==============================] - 0s 85us/step - loss: 0.5771 - accuracy: 0.7288 - val_loss: 0.6205 - val_accuracy: 0.7089
Epoch 27/100
236/236 [==============================] - 0s 97us/step - loss: 0.5768 - accuracy: 0.7288 - val_loss: 0.6204 - val_accuracy: 0.7089
Epoch 28/100
236/236 [==============================] - 0s 89us/step - loss: 0.5765 - accuracy: 0.7288 - val_loss: 0.6193 - val_accuracy: 0.7089

Epoch 00028: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 29/100
236/236 [==============================] - 0s 93us/step - loss: 0.5762 - accuracy: 0.7288 - val_loss: 0.6190 - val_accuracy: 0.7089
Epoch 30/100
236/236 [==============================] - 0s 93us/step - loss: 0.5759 - accuracy: 0.7288 - val_loss: 0.6188 - val_accuracy: 0.7089
Epoch 31/100
236/236 [==============================] - 0s 89us/step - loss: 0.5757 - accuracy: 0.7288 - val_loss: 0.6186 - val_accuracy: 0.7089
Epoch 32/100
236/236 [==============================] - 0s 93us/step - loss: 0.5755 - accuracy: 0.7288 - val_loss: 0.6184 - val_accuracy: 0.7089
Epoch 33/100
236/236 [==============================] - 0s 102us/step - loss: 0.5753 - accuracy: 0.7288 - val_loss: 0.6182 - val_accuracy: 0.7089
Epoch 34/100
236/236 [==============================] - 0s 119us/step - loss: 0.5751 - accuracy: 0.7288 - val_loss: 0.6180 - val_accuracy: 0.7089
Epoch 35/100
236/236 [==============================] - 0s 102us/step - loss: 0.5750 - accuracy: 0.7288 - val_loss: 0.6178 - val_accuracy: 0.7089
Epoch 36/100
236/236 [==============================] - 0s 102us/step - loss: 0.5749 - accuracy: 0.7288 - val_loss: 0.6175 - val_accuracy: 0.7089
Epoch 37/100
236/236 [==============================] - 0s 93us/step - loss: 0.5746 - accuracy: 0.7288 - val_loss: 0.6176 - val_accuracy: 0.7089
Epoch 38/100
236/236 [==============================] - 0s 93us/step - loss: 0.5744 - accuracy: 0.7288 - val_loss: 0.6175 - val_accuracy: 0.7089

Epoch 00038: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 39/100
236/236 [==============================] - 0s 102us/step - loss: 0.5743 - accuracy: 0.7288 - val_loss: 0.6175 - val_accuracy: 0.7089
Epoch 40/100
236/236 [==============================] - 0s 93us/step - loss: 0.5742 - accuracy: 0.7288 - val_loss: 0.6175 - val_accuracy: 0.7089
Epoch 41/100
236/236 [==============================] - 0s 97us/step - loss: 0.5742 - accuracy: 0.7288 - val_loss: 0.6173 - val_accuracy: 0.7089
Epoch 42/100
236/236 [==============================] - 0s 93us/step - loss: 0.5740 - accuracy: 0.7288 - val_loss: 0.6173 - val_accuracy: 0.7089
Epoch 43/100
236/236 [==============================] - 0s 114us/step - loss: 0.5740 - accuracy: 0.7288 - val_loss: 0.6172 - val_accuracy: 0.7089
Epoch 44/100
236/236 [==============================] - 0s 97us/step - loss: 0.5739 - accuracy: 0.7288 - val_loss: 0.6171 - val_accuracy: 0.7089
Epoch 45/100
236/236 [==============================] - 0s 93us/step - loss: 0.5738 - accuracy: 0.7288 - val_loss: 0.6171 - val_accuracy: 0.7089
Epoch 46/100
236/236 [==============================] - 0s 97us/step - loss: 0.5738 - accuracy: 0.7288 - val_loss: 0.6170 - val_accuracy: 0.7089
Epoch 47/100
236/236 [==============================] - 0s 93us/step - loss: 0.5737 - accuracy: 0.7288 - val_loss: 0.6171 - val_accuracy: 0.7089
Epoch 48/100
236/236 [==============================] - 0s 123us/step - loss: 0.5735 - accuracy: 0.7288 - val_loss: 0.6170 - val_accuracy: 0.7089

Epoch 00048: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 49/100
236/236 [==============================] - 0s 93us/step - loss: 0.5735 - accuracy: 0.7288 - val_loss: 0.6170 - val_accuracy: 0.7089
Epoch 50/100
236/236 [==============================] - 0s 93us/step - loss: 0.5734 - accuracy: 0.7288 - val_loss: 0.6170 - val_accuracy: 0.7089
Epoch 51/100
236/236 [==============================] - 0s 89us/step - loss: 0.5734 - accuracy: 0.7288 - val_loss: 0.6170 - val_accuracy: 0.7089
Epoch 52/100
236/236 [==============================] - 0s 89us/step - loss: 0.5733 - accuracy: 0.7288 - val_loss: 0.6169 - val_accuracy: 0.7089
Epoch 53/100
236/236 [==============================] - 0s 89us/step - loss: 0.5733 - accuracy: 0.7288 - val_loss: 0.6169 - val_accuracy: 0.7089
Epoch 54/100
236/236 [==============================] - 0s 97us/step - loss: 0.5733 - accuracy: 0.7288 - val_loss: 0.6169 - val_accuracy: 0.7089
Epoch 55/100
236/236 [==============================] - 0s 89us/step - loss: 0.5732 - accuracy: 0.7288 - val_loss: 0.6169 - val_accuracy: 0.7089
Epoch 56/100
236/236 [==============================] - 0s 102us/step - loss: 0.5732 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 57/100
236/236 [==============================] - 0s 89us/step - loss: 0.5731 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 58/100
236/236 [==============================] - 0s 97us/step - loss: 0.5731 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089

Epoch 00058: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 59/100
236/236 [==============================] - 0s 89us/step - loss: 0.5731 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 60/100
236/236 [==============================] - 0s 97us/step - loss: 0.5730 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 61/100
236/236 [==============================] - 0s 89us/step - loss: 0.5730 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 62/100
236/236 [==============================] - 0s 93us/step - loss: 0.5730 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 63/100
236/236 [==============================] - 0s 110us/step - loss: 0.5730 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 64/100
236/236 [==============================] - 0s 131us/step - loss: 0.5730 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 65/100
236/236 [==============================] - 0s 102us/step - loss: 0.5729 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 66/100
236/236 [==============================] - 0s 102us/step - loss: 0.5729 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 67/100
236/236 [==============================] - 0s 110us/step - loss: 0.5729 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 68/100
236/236 [==============================] - 0s 102us/step - loss: 0.5729 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089

Epoch 00068: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 69/100
236/236 [==============================] - 0s 106us/step - loss: 0.5729 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 70/100
236/236 [==============================] - 0s 97us/step - loss: 0.5729 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 71/100
236/236 [==============================] - 0s 97us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 72/100
236/236 [==============================] - 0s 102us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 73/100
236/236 [==============================] - 0s 97us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 74/100
236/236 [==============================] - 0s 97us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 75/100
236/236 [==============================] - 0s 110us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 76/100
236/236 [==============================] - 0s 97us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 77/100
236/236 [==============================] - 0s 102us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089
Epoch 78/100
236/236 [==============================] - 0s 114us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6168 - val_accuracy: 0.7089

Epoch 00078: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 79/100
236/236 [==============================] - 0s 93us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 80/100
236/236 [==============================] - 0s 119us/step - loss: 0.5728 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 81/100
236/236 [==============================] - 0s 119us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 82/100
236/236 [==============================] - 0s 110us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 83/100
236/236 [==============================] - 0s 93us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 84/100
236/236 [==============================] - 0s 102us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 85/100
236/236 [==============================] - 0s 123us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 86/100
236/236 [==============================] - 0s 102us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 87/100
236/236 [==============================] - 0s 102us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 88/100
236/236 [==============================] - 0s 106us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089

Epoch 00088: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 89/100
236/236 [==============================] - 0s 114us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 90/100
236/236 [==============================] - 0s 110us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 91/100
236/236 [==============================] - 0s 102us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 92/100
236/236 [==============================] - 0s 110us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 93/100
236/236 [==============================] - 0s 106us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 94/100
236/236 [==============================] - 0s 106us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 95/100
236/236 [==============================] - 0s 106us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 96/100
236/236 [==============================] - 0s 114us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 97/100
236/236 [==============================] - 0s 131us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 98/100
236/236 [==============================] - 0s 110us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089

Epoch 00098: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 99/100
236/236 [==============================] - 0s 110us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
Epoch 100/100
236/236 [==============================] - 0s 106us/step - loss: 0.5727 - accuracy: 0.7288 - val_loss: 0.6167 - val_accuracy: 0.7089
In [147]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 100)
In [148]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
79/79 [==============================] - 0s 63us/step
test loss: 0.6167174675796605, test accuracy: 0.7088607549667358
In [149]:
y_pred = model.predict(X_test)
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.0
AUC ROC:  0.5
[[56  0]
 [23  0]]

KMeans

In [236]:
X
Out[236]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.585484 0.923309 -0.748807 1.209820 -1.073924 0.283035
1 1.129768 0.963814 0.209096 -0.143471 0.115184 0.020905
2 -0.066076 1.857866 1.921193 1.093619 2.089353 1.984310
3 0.119831 1.429286 1.472808 1.282029 1.096897 2.122871
4 -0.123292 0.197415 0.503797 1.431215 1.715761 0.683611
5 -0.553148 0.127288 2.842797 -1.267536 1.273635 2.271003
6 -0.391161 -0.277979 1.117190 0.713713 -1.700239 -0.587212
7 -0.538913 1.251767 0.804764 0.214490 -0.879193 0.306379
8 -0.032875 0.470932 -0.097587 0.648742 -1.031819 0.164361
9 0.212612 0.417050 0.248906 -1.495698 1.707020 -0.592557
10 0.503861 0.931556 -0.151041 -0.292255 -0.447486 -0.196969
11 0.279193 0.515238 0.967780 0.662445 0.209303 0.507757
12 0.049966 -0.352640 -0.595831 0.468652 -1.490626 0.103993
13 0.224941 -0.251218 0.462542 0.909846 -1.326305 -0.026245
14 -1.007562 -0.018176 -1.310793 -1.236878 1.573941 -1.709828
15 -0.156222 0.809961 0.891274 -0.560523 1.892119 -0.767684
16 -0.561577 0.649671 0.616195 -1.560389 1.268177 -1.094196
17 0.828743 -1.472369 0.354955 0.144804 -1.811789 -0.965157
18 0.520383 -1.982735 -0.524125 -0.227001 -1.732849 -1.131893
19 0.619988 -1.486445 0.400141 -0.931997 -2.408325 -0.039112
20 -1.718572 1.328228 -0.790084 -2.422930 -1.075452 -0.362318
21 -1.673788 -0.008250 -0.699254 -0.669847 0.358439 -1.911212
22 -1.660892 0.201931 -1.149618 -0.154362 -1.229344 -1.403777
23 0.467761 1.771253 -2.115824 -2.524369 -0.191251 0.283914
24 1.365177 1.009311 0.060605 -0.430112 -1.445585 0.147080
25 0.526073 -0.108881 -0.890367 0.453905 -1.553338 1.635917
26 0.396699 1.270857 -0.933936 -0.265038 -0.598090 1.282414
27 1.042904 1.615651 -1.342135 -1.108659 -0.228062 0.704441
28 0.488482 1.667605 -0.222726 -1.289806 0.833486 -0.007520
29 -0.206765 -0.088250 0.214115 0.280450 -0.032652 1.281632
... ... ... ... ... ... ...
285 0.761670 -1.885823 0.301159 -0.516737 -0.384864 -0.542683
286 2.233841 -0.021303 1.621452 -1.116993 0.705855 -1.623585
287 0.933521 0.065790 -1.295122 0.574358 -0.278402 2.277615
288 1.398839 -0.456314 -1.182173 0.348139 0.231267 1.399962
289 0.982720 0.097900 -0.814050 0.852544 0.308591 1.608777
290 0.959009 -1.443293 -0.329974 -0.253115 0.724219 -0.415649
291 -3.186710 0.207715 -1.442295 -0.713479 -0.644843 0.684665
292 -2.266002 0.208427 0.090970 0.014667 -0.927106 1.146918
293 1.473030 0.944250 -0.160216 0.323871 -0.664953 0.727193
294 2.116511 1.003706 -1.374891 -0.601957 -1.760610 0.014541
295 1.516890 0.883674 -1.850520 0.688076 -1.350999 0.620360
296 -0.171687 0.469515 0.407395 1.081823 -1.053878 -0.023872
297 -0.023957 0.051075 0.045786 0.108234 -0.643408 0.527902
298 0.152215 0.030843 0.217573 0.063538 -0.471831 0.840207
299 -0.286639 0.215830 -0.245963 0.927776 -0.474599 0.233343
300 0.411970 0.642559 -0.319323 1.141506 -0.291830 0.197165
301 0.915776 0.420715 -0.435877 0.621147 -0.746445 0.397040
302 -0.963660 -2.504276 0.149799 1.260867 0.289108 -0.386784
303 -0.732467 -1.137228 -0.806385 1.023830 -0.646676 -0.876828
304 0.175270 -2.037232 -1.136866 -0.405235 -0.294667 0.533651
305 1.507133 -2.022655 -0.176808 -2.220532 -0.028497 1.571387
306 3.112152 -1.221959 0.020285 -2.830754 -0.654301 0.750704
307 2.013689 -1.475971 -1.690376 -2.773483 -0.813379 2.885407
308 1.511606 0.675622 -0.743850 -1.491272 -0.807844 -0.418377
309 1.412105 1.763606 -0.378812 -1.313665 -0.185153 -0.847850
310 1.122395 0.814854 -0.854500 -1.402116 0.114392 -0.882142
311 0.336108 0.216775 0.080290 -0.047519 -0.756037 -1.071728
312 1.201358 0.783381 -0.221010 0.308862 -1.206838 -0.690837
313 -0.672924 -0.268212 1.143994 -0.147440 2.008975 -0.652644
314 -0.034368 1.050289 0.424944 0.805168 -0.376947 -0.658873

315 rows × 6 columns

In [237]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[237]:
[1890.0,
 1580.620167230212,
 1363.1774930694505,
 1195.4345875172114,
 1096.6320586530273,
 1013.9501053083941,
 953.1588875587528,
 882.9139429264524,
 844.4886425171022,
 815.6880457942883,
 788.9746059037274,
 745.3922927967658,
 722.2827052057237,
 709.9626886481943]
In [238]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[238]:
[<matplotlib.lines.Line2D at 0x1b829b146a0>]

K=4

In [239]:
kmeans_tc = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[239]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [240]:
kmeans_tc.labels_
Out[240]:
array([1, 1, 2, 2, 2, 2, 3, 2, 2, 0, 1, 2, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0,
       3, 0, 1, 1, 1, 1, 0, 2, 2, 0, 0, 3, 3, 3, 3, 3, 2, 2, 2, 0, 0, 0,
       1, 1, 1, 1, 1, 3, 3, 2, 2, 2, 0, 0, 3, 0, 0, 2, 2, 2, 0, 0, 0, 0,
       1, 1, 2, 2, 2, 1, 1, 1, 0, 2, 0, 2, 2, 2, 2, 0, 2, 0, 1, 0, 0, 0,
       3, 1, 2, 3, 0, 3, 3, 1, 3, 2, 3, 3, 2, 2, 2, 0, 2, 2, 2, 2, 3, 3,
       1, 2, 2, 2, 3, 0, 1, 3, 1, 2, 0, 3, 3, 3, 1, 3, 1, 1, 0, 0, 2, 2,
       3, 3, 2, 1, 1, 1, 1, 1, 3, 1, 3, 3, 0, 2, 3, 1, 0, 1, 1, 0, 1, 2,
       1, 1, 1, 2, 3, 2, 3, 1, 3, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 3, 1, 3,
       1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 1, 2, 1, 3, 1, 1, 0,
       0, 0, 3, 1, 1, 0, 0, 0, 0, 1, 3, 3, 3, 0, 0, 3, 3, 3, 1, 1, 3, 0,
       1, 2, 1, 1, 0, 0, 0, 0, 3, 2, 2, 1, 1, 1, 2, 0, 1, 0, 1, 3, 2, 2,
       0, 2, 2, 2, 1, 2, 3, 3, 2, 2, 2, 0, 2, 2, 3, 3, 0, 3, 3, 0, 0, 1,
       1, 3, 3, 3, 3, 3, 3, 1, 2, 2, 0, 2, 2, 2, 1, 1, 0, 1, 3, 3, 3, 3,
       0, 1, 1, 1, 1, 0, 2, 1, 1, 1, 2, 2, 2, 2, 1, 1, 3, 3, 1, 1, 1, 1,
       1, 0, 0, 3, 1, 0, 2])
In [241]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[241]:
array([1, 1, 2, 2, 2, 2, 3, 2, 2, 0, 1, 2, 3, 3, 0, 0, 0, 3, 3, 3, 0, 0,
       3, 0, 1, 1, 1, 1, 0, 2, 2, 0, 0, 3, 3, 3, 3, 3, 2, 2, 2, 0, 0, 0,
       1, 1, 1, 1, 1, 3, 3, 2, 2, 2, 0, 0, 3, 0, 0, 2, 2, 2, 0, 0, 0, 0,
       1, 1, 2, 2, 2, 1, 1, 1, 0, 2, 0, 2, 2, 2, 2, 0, 2, 0, 1, 0, 0, 0,
       3, 1, 2, 3, 0, 3, 3, 1, 3, 2, 3, 3, 2, 2, 2, 0, 2, 2, 2, 2, 3, 3,
       1, 2, 2, 2, 3, 0, 1, 3, 1, 2, 0, 3, 3, 3, 1, 3, 1, 1, 0, 0, 2, 2,
       3, 3, 2, 1, 1, 1, 1, 1, 3, 1, 3, 3, 0, 2, 3, 1, 0, 1, 1, 0, 1, 2,
       1, 1, 1, 2, 3, 2, 3, 1, 3, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 3, 1, 3,
       1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 1, 2, 1, 3, 1, 1, 0,
       0, 0, 3, 1, 1, 0, 0, 0, 0, 1, 3, 3, 3, 0, 0, 3, 3, 3, 1, 1, 3, 0,
       1, 2, 1, 1, 0, 0, 0, 0, 3, 2, 2, 1, 1, 1, 2, 0, 1, 0, 1, 3, 2, 2,
       0, 2, 2, 2, 1, 2, 3, 3, 2, 2, 2, 0, 2, 2, 3, 3, 0, 3, 3, 0, 0, 1,
       1, 3, 3, 3, 3, 3, 3, 1, 2, 2, 0, 2, 2, 2, 1, 1, 0, 1, 3, 3, 3, 3,
       0, 1, 1, 1, 1, 0, 2, 1, 1, 1, 2, 2, 2, 2, 1, 1, 3, 3, 1, 1, 1, 1,
       1, 0, 0, 3, 1, 0, 2])
In [242]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [243]:
X
Out[243]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.585484 0.923309 -0.748807 1.209820 -1.073924 0.283035 1 0
1 1.129768 0.963814 0.209096 -0.143471 0.115184 0.020905 1 0
2 -0.066076 1.857866 1.921193 1.093619 2.089353 1.984310 2 0
3 0.119831 1.429286 1.472808 1.282029 1.096897 2.122871 2 0
4 -0.123292 0.197415 0.503797 1.431215 1.715761 0.683611 2 0
5 -0.553148 0.127288 2.842797 -1.267536 1.273635 2.271003 2 0
6 -0.391161 -0.277979 1.117190 0.713713 -1.700239 -0.587212 3 0
7 -0.538913 1.251767 0.804764 0.214490 -0.879193 0.306379 2 0
8 -0.032875 0.470932 -0.097587 0.648742 -1.031819 0.164361 2 0
9 0.212612 0.417050 0.248906 -1.495698 1.707020 -0.592557 0 0
10 0.503861 0.931556 -0.151041 -0.292255 -0.447486 -0.196969 1 0
11 0.279193 0.515238 0.967780 0.662445 0.209303 0.507757 2 0
12 0.049966 -0.352640 -0.595831 0.468652 -1.490626 0.103993 3 0
13 0.224941 -0.251218 0.462542 0.909846 -1.326305 -0.026245 3 0
14 -1.007562 -0.018176 -1.310793 -1.236878 1.573941 -1.709828 0 0
15 -0.156222 0.809961 0.891274 -0.560523 1.892119 -0.767684 0 0
16 -0.561577 0.649671 0.616195 -1.560389 1.268177 -1.094196 0 0
17 0.828743 -1.472369 0.354955 0.144804 -1.811789 -0.965157 3 0
18 0.520383 -1.982735 -0.524125 -0.227001 -1.732849 -1.131893 3 0
19 0.619988 -1.486445 0.400141 -0.931997 -2.408325 -0.039112 3 0
20 -1.718572 1.328228 -0.790084 -2.422930 -1.075452 -0.362318 0 0
21 -1.673788 -0.008250 -0.699254 -0.669847 0.358439 -1.911212 0 0
22 -1.660892 0.201931 -1.149618 -0.154362 -1.229344 -1.403777 3 0
23 0.467761 1.771253 -2.115824 -2.524369 -0.191251 0.283914 0 0
24 1.365177 1.009311 0.060605 -0.430112 -1.445585 0.147080 1 0
25 0.526073 -0.108881 -0.890367 0.453905 -1.553338 1.635917 1 0
26 0.396699 1.270857 -0.933936 -0.265038 -0.598090 1.282414 1 0
27 1.042904 1.615651 -1.342135 -1.108659 -0.228062 0.704441 1 0
28 0.488482 1.667605 -0.222726 -1.289806 0.833486 -0.007520 0 0
29 -0.206765 -0.088250 0.214115 0.280450 -0.032652 1.281632 2 0
... ... ... ... ... ... ... ... ...
285 0.761670 -1.885823 0.301159 -0.516737 -0.384864 -0.542683 3 1
286 2.233841 -0.021303 1.621452 -1.116993 0.705855 -1.623585 0 1
287 0.933521 0.065790 -1.295122 0.574358 -0.278402 2.277615 1 1
288 1.398839 -0.456314 -1.182173 0.348139 0.231267 1.399962 1 1
289 0.982720 0.097900 -0.814050 0.852544 0.308591 1.608777 1 1
290 0.959009 -1.443293 -0.329974 -0.253115 0.724219 -0.415649 1 1
291 -3.186710 0.207715 -1.442295 -0.713479 -0.644843 0.684665 0 1
292 -2.266002 0.208427 0.090970 0.014667 -0.927106 1.146918 2 1
293 1.473030 0.944250 -0.160216 0.323871 -0.664953 0.727193 1 1
294 2.116511 1.003706 -1.374891 -0.601957 -1.760610 0.014541 1 1
295 1.516890 0.883674 -1.850520 0.688076 -1.350999 0.620360 1 1
296 -0.171687 0.469515 0.407395 1.081823 -1.053878 -0.023872 2 1
297 -0.023957 0.051075 0.045786 0.108234 -0.643408 0.527902 2 1
298 0.152215 0.030843 0.217573 0.063538 -0.471831 0.840207 2 1
299 -0.286639 0.215830 -0.245963 0.927776 -0.474599 0.233343 2 1
300 0.411970 0.642559 -0.319323 1.141506 -0.291830 0.197165 1 1
301 0.915776 0.420715 -0.435877 0.621147 -0.746445 0.397040 1 1
302 -0.963660 -2.504276 0.149799 1.260867 0.289108 -0.386784 3 1
303 -0.732467 -1.137228 -0.806385 1.023830 -0.646676 -0.876828 3 1
304 0.175270 -2.037232 -1.136866 -0.405235 -0.294667 0.533651 1 1
305 1.507133 -2.022655 -0.176808 -2.220532 -0.028497 1.571387 1 1
306 3.112152 -1.221959 0.020285 -2.830754 -0.654301 0.750704 1 1
307 2.013689 -1.475971 -1.690376 -2.773483 -0.813379 2.885407 1 1
308 1.511606 0.675622 -0.743850 -1.491272 -0.807844 -0.418377 1 1
309 1.412105 1.763606 -0.378812 -1.313665 -0.185153 -0.847850 0 1
310 1.122395 0.814854 -0.854500 -1.402116 0.114392 -0.882142 0 1
311 0.336108 0.216775 0.080290 -0.047519 -0.756037 -1.071728 3 1
312 1.201358 0.783381 -0.221010 0.308862 -1.206838 -0.690837 1 1
313 -0.672924 -0.268212 1.143994 -0.147440 2.008975 -0.652644 0 1
314 -0.034368 1.050289 0.424944 0.805168 -0.376947 -0.658873 2 1

315 rows × 8 columns

In [244]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[244]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b829d7d748>
In [245]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

Club De Banqueros y Empresarios

ANN

In [150]:
X = df_n_ps_std_tc[1]
In [151]:
y = df_n_ps[1]['chosen']
In [152]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [153]:
X_train.shape
Out[153]:
(191, 6)
In [154]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [155]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [156]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [253]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.002, 'max_iter': 200}, que permiten obtener un Accuracy de 78.01% y un Kappa del 31.63
Tiempo total: 31.35 minutos
In [157]:
grid.best_params_= {'activation': 'relu', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.002, 'max_iter': 200}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [158]:
input_tensor = Input(shape = (n0,))
In [159]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [160]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [161]:
model.summary()
Model: "model_8"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_9 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_24 (Dense)             (None, 20)                140       
_________________________________________________________________
dense_25 (Dense)             (None, 1)                 21        
=================================================================
Total params: 161
Trainable params: 161
Non-trainable params: 0
_________________________________________________________________
In [162]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/200
191/191 [==============================] - 0s 853us/step - loss: 0.7042 - accuracy: 0.5864 - val_loss: 0.7027 - val_accuracy: 0.6094
Epoch 2/200
191/191 [==============================] - 0s 63us/step - loss: 0.6768 - accuracy: 0.6126 - val_loss: 0.6629 - val_accuracy: 0.6562
Epoch 3/200
191/191 [==============================] - 0s 58us/step - loss: 0.6551 - accuracy: 0.6545 - val_loss: 0.6287 - val_accuracy: 0.6875
Epoch 4/200
191/191 [==============================] - 0s 52us/step - loss: 0.6361 - accuracy: 0.6545 - val_loss: 0.5994 - val_accuracy: 0.6719
Epoch 5/200
191/191 [==============================] - 0s 63us/step - loss: 0.6194 - accuracy: 0.6492 - val_loss: 0.5762 - val_accuracy: 0.6719
Epoch 6/200
191/191 [==============================] - 0s 58us/step - loss: 0.6084 - accuracy: 0.6806 - val_loss: 0.5564 - val_accuracy: 0.7031
Epoch 7/200
191/191 [==============================] - 0s 58us/step - loss: 0.5988 - accuracy: 0.6859 - val_loss: 0.5405 - val_accuracy: 0.7344
Epoch 8/200
191/191 [==============================] - 0s 58us/step - loss: 0.5920 - accuracy: 0.6754 - val_loss: 0.5273 - val_accuracy: 0.7500
Epoch 9/200
191/191 [==============================] - 0s 52us/step - loss: 0.5862 - accuracy: 0.6806 - val_loss: 0.5176 - val_accuracy: 0.7500
Epoch 10/200
191/191 [==============================] - 0s 63us/step - loss: 0.5817 - accuracy: 0.6859 - val_loss: 0.5105 - val_accuracy: 0.7344
Epoch 11/200
191/191 [==============================] - 0s 63us/step - loss: 0.5775 - accuracy: 0.6911 - val_loss: 0.5053 - val_accuracy: 0.7500
Epoch 12/200
191/191 [==============================] - 0s 63us/step - loss: 0.5749 - accuracy: 0.6911 - val_loss: 0.5011 - val_accuracy: 0.7656
Epoch 13/200
191/191 [==============================] - 0s 58us/step - loss: 0.5722 - accuracy: 0.6911 - val_loss: 0.4985 - val_accuracy: 0.7656
Epoch 14/200
191/191 [==============================] - 0s 52us/step - loss: 0.5697 - accuracy: 0.6911 - val_loss: 0.4965 - val_accuracy: 0.7656
Epoch 15/200
191/191 [==============================] - 0s 63us/step - loss: 0.5678 - accuracy: 0.6911 - val_loss: 0.4948 - val_accuracy: 0.7656
Epoch 16/200
191/191 [==============================] - 0s 63us/step - loss: 0.5658 - accuracy: 0.7016 - val_loss: 0.4946 - val_accuracy: 0.7500
Epoch 17/200
191/191 [==============================] - 0s 68us/step - loss: 0.5638 - accuracy: 0.7016 - val_loss: 0.4939 - val_accuracy: 0.7500
Epoch 18/200
191/191 [==============================] - 0s 63us/step - loss: 0.5620 - accuracy: 0.7068 - val_loss: 0.4932 - val_accuracy: 0.7344
Epoch 19/200
191/191 [==============================] - 0s 63us/step - loss: 0.5604 - accuracy: 0.7016 - val_loss: 0.4928 - val_accuracy: 0.7344
Epoch 20/200
191/191 [==============================] - 0s 58us/step - loss: 0.5590 - accuracy: 0.7016 - val_loss: 0.4920 - val_accuracy: 0.7500
Epoch 21/200
191/191 [==============================] - 0s 63us/step - loss: 0.5571 - accuracy: 0.7068 - val_loss: 0.4925 - val_accuracy: 0.7500
Epoch 22/200
191/191 [==============================] - 0s 73us/step - loss: 0.5554 - accuracy: 0.7016 - val_loss: 0.4933 - val_accuracy: 0.7344

Epoch 00022: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 23/200
191/191 [==============================] - 0s 99us/step - loss: 0.5542 - accuracy: 0.7068 - val_loss: 0.4932 - val_accuracy: 0.7500
Epoch 24/200
191/191 [==============================] - 0s 68us/step - loss: 0.5533 - accuracy: 0.7068 - val_loss: 0.4938 - val_accuracy: 0.7500
Epoch 25/200
191/191 [==============================] - 0s 68us/step - loss: 0.5526 - accuracy: 0.7120 - val_loss: 0.4940 - val_accuracy: 0.7500
Epoch 26/200
191/191 [==============================] - 0s 58us/step - loss: 0.5518 - accuracy: 0.7173 - val_loss: 0.4940 - val_accuracy: 0.7500
Epoch 27/200
191/191 [==============================] - 0s 63us/step - loss: 0.5510 - accuracy: 0.7277 - val_loss: 0.4944 - val_accuracy: 0.7500
Epoch 28/200
191/191 [==============================] - 0s 68us/step - loss: 0.5502 - accuracy: 0.7277 - val_loss: 0.4949 - val_accuracy: 0.7500
Epoch 29/200
191/191 [==============================] - 0s 73us/step - loss: 0.5495 - accuracy: 0.7277 - val_loss: 0.4951 - val_accuracy: 0.7500
Epoch 30/200
191/191 [==============================] - 0s 68us/step - loss: 0.5487 - accuracy: 0.7277 - val_loss: 0.4953 - val_accuracy: 0.7500
Epoch 31/200
191/191 [==============================] - 0s 78us/step - loss: 0.5482 - accuracy: 0.7277 - val_loss: 0.4954 - val_accuracy: 0.7500
Epoch 32/200
191/191 [==============================] - 0s 68us/step - loss: 0.5475 - accuracy: 0.7225 - val_loss: 0.4960 - val_accuracy: 0.7500

Epoch 00032: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 33/200
191/191 [==============================] - 0s 63us/step - loss: 0.5467 - accuracy: 0.7173 - val_loss: 0.4960 - val_accuracy: 0.7500
Epoch 34/200
191/191 [==============================] - 0s 63us/step - loss: 0.5464 - accuracy: 0.7173 - val_loss: 0.4959 - val_accuracy: 0.7500
Epoch 35/200
191/191 [==============================] - 0s 52us/step - loss: 0.5461 - accuracy: 0.7120 - val_loss: 0.4961 - val_accuracy: 0.7500
Epoch 36/200
191/191 [==============================] - 0s 58us/step - loss: 0.5457 - accuracy: 0.7120 - val_loss: 0.4961 - val_accuracy: 0.7500
Epoch 37/200
191/191 [==============================] - 0s 63us/step - loss: 0.5453 - accuracy: 0.7068 - val_loss: 0.4964 - val_accuracy: 0.7500
Epoch 38/200
191/191 [==============================] - 0s 68us/step - loss: 0.5450 - accuracy: 0.7068 - val_loss: 0.4964 - val_accuracy: 0.7500
Epoch 39/200
191/191 [==============================] - 0s 63us/step - loss: 0.5446 - accuracy: 0.7068 - val_loss: 0.4964 - val_accuracy: 0.7500
Epoch 40/200
191/191 [==============================] - 0s 58us/step - loss: 0.5444 - accuracy: 0.7120 - val_loss: 0.4966 - val_accuracy: 0.7500
Epoch 41/200
191/191 [==============================] - 0s 52us/step - loss: 0.5439 - accuracy: 0.7173 - val_loss: 0.4965 - val_accuracy: 0.7500
Epoch 42/200
191/191 [==============================] - 0s 63us/step - loss: 0.5436 - accuracy: 0.7120 - val_loss: 0.4968 - val_accuracy: 0.7500

Epoch 00042: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 43/200
191/191 [==============================] - 0s 68us/step - loss: 0.5432 - accuracy: 0.7120 - val_loss: 0.4968 - val_accuracy: 0.7500
Epoch 44/200
191/191 [==============================] - 0s 68us/step - loss: 0.5431 - accuracy: 0.7173 - val_loss: 0.4968 - val_accuracy: 0.7500
Epoch 45/200
191/191 [==============================] - 0s 68us/step - loss: 0.5429 - accuracy: 0.7173 - val_loss: 0.4970 - val_accuracy: 0.7500
Epoch 46/200
191/191 [==============================] - 0s 68us/step - loss: 0.5427 - accuracy: 0.7173 - val_loss: 0.4970 - val_accuracy: 0.7500
Epoch 47/200
191/191 [==============================] - 0s 68us/step - loss: 0.5425 - accuracy: 0.7225 - val_loss: 0.4970 - val_accuracy: 0.7500
Epoch 48/200
191/191 [==============================] - 0s 63us/step - loss: 0.5424 - accuracy: 0.7225 - val_loss: 0.4970 - val_accuracy: 0.7500
Epoch 49/200
191/191 [==============================] - 0s 94us/step - loss: 0.5422 - accuracy: 0.7225 - val_loss: 0.4971 - val_accuracy: 0.7500
Epoch 50/200
191/191 [==============================] - 0s 89us/step - loss: 0.5420 - accuracy: 0.7225 - val_loss: 0.4970 - val_accuracy: 0.7500
Epoch 51/200
191/191 [==============================] - 0s 89us/step - loss: 0.5419 - accuracy: 0.7225 - val_loss: 0.4972 - val_accuracy: 0.7500
Epoch 52/200
191/191 [==============================] - 0s 89us/step - loss: 0.5417 - accuracy: 0.7225 - val_loss: 0.4974 - val_accuracy: 0.7500

Epoch 00052: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 53/200
191/191 [==============================] - 0s 84us/step - loss: 0.5415 - accuracy: 0.7277 - val_loss: 0.4975 - val_accuracy: 0.7500
Epoch 54/200
191/191 [==============================] - 0s 84us/step - loss: 0.5414 - accuracy: 0.7277 - val_loss: 0.4974 - val_accuracy: 0.7500
Epoch 55/200
191/191 [==============================] - 0s 115us/step - loss: 0.5413 - accuracy: 0.7277 - val_loss: 0.4974 - val_accuracy: 0.7500
Epoch 56/200
191/191 [==============================] - 0s 87us/step - loss: 0.5412 - accuracy: 0.7277 - val_loss: 0.4974 - val_accuracy: 0.7500
Epoch 57/200
191/191 [==============================] - 0s 107us/step - loss: 0.5412 - accuracy: 0.7277 - val_loss: 0.4974 - val_accuracy: 0.7500
Epoch 58/200
191/191 [==============================] - 0s 89us/step - loss: 0.5411 - accuracy: 0.7277 - val_loss: 0.4974 - val_accuracy: 0.7500
Epoch 59/200
191/191 [==============================] - 0s 78us/step - loss: 0.5410 - accuracy: 0.7277 - val_loss: 0.4975 - val_accuracy: 0.7500
Epoch 60/200
191/191 [==============================] - ETA: 0s - loss: 0.5595 - accuracy: 0.71 - 0s 78us/step - loss: 0.5409 - accuracy: 0.7277 - val_loss: 0.4975 - val_accuracy: 0.7500
Epoch 61/200
191/191 [==============================] - 0s 84us/step - loss: 0.5408 - accuracy: 0.7277 - val_loss: 0.4976 - val_accuracy: 0.7500
Epoch 62/200
191/191 [==============================] - 0s 84us/step - loss: 0.5407 - accuracy: 0.7277 - val_loss: 0.4976 - val_accuracy: 0.7500

Epoch 00062: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 63/200
191/191 [==============================] - 0s 89us/step - loss: 0.5407 - accuracy: 0.7277 - val_loss: 0.4976 - val_accuracy: 0.7500
Epoch 64/200
191/191 [==============================] - 0s 89us/step - loss: 0.5406 - accuracy: 0.7277 - val_loss: 0.4976 - val_accuracy: 0.7500
Epoch 65/200
191/191 [==============================] - 0s 89us/step - loss: 0.5406 - accuracy: 0.7277 - val_loss: 0.4976 - val_accuracy: 0.7500
Epoch 66/200
191/191 [==============================] - 0s 73us/step - loss: 0.5405 - accuracy: 0.7277 - val_loss: 0.4976 - val_accuracy: 0.7500
Epoch 67/200
191/191 [==============================] - 0s 131us/step - loss: 0.5405 - accuracy: 0.7277 - val_loss: 0.4977 - val_accuracy: 0.7500
Epoch 68/200
191/191 [==============================] - 0s 89us/step - loss: 0.5405 - accuracy: 0.7277 - val_loss: 0.4977 - val_accuracy: 0.7500
Epoch 69/200
191/191 [==============================] - 0s 89us/step - loss: 0.5404 - accuracy: 0.7277 - val_loss: 0.4977 - val_accuracy: 0.7500
Epoch 70/200
191/191 [==============================] - 0s 89us/step - loss: 0.5404 - accuracy: 0.7277 - val_loss: 0.4977 - val_accuracy: 0.7500
Epoch 71/200
191/191 [==============================] - 0s 89us/step - loss: 0.5403 - accuracy: 0.7277 - val_loss: 0.4977 - val_accuracy: 0.7500
Epoch 72/200
191/191 [==============================] - 0s 73us/step - loss: 0.5403 - accuracy: 0.7277 - val_loss: 0.4977 - val_accuracy: 0.7500

Epoch 00072: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 73/200
191/191 [==============================] - 0s 84us/step - loss: 0.5403 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 74/200
191/191 [==============================] - 0s 84us/step - loss: 0.5402 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 75/200
191/191 [==============================] - 0s 84us/step - loss: 0.5402 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 76/200
191/191 [==============================] - 0s 94us/step - loss: 0.5402 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 77/200
191/191 [==============================] - 0s 84us/step - loss: 0.5402 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 78/200
191/191 [==============================] - 0s 78us/step - loss: 0.5402 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 79/200
191/191 [==============================] - 0s 84us/step - loss: 0.5401 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 80/200
191/191 [==============================] - 0s 89us/step - loss: 0.5401 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 81/200
191/191 [==============================] - 0s 89us/step - loss: 0.5401 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 82/200
191/191 [==============================] - 0s 84us/step - loss: 0.5401 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500

Epoch 00082: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 83/200
191/191 [==============================] - 0s 78us/step - loss: 0.5401 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 84/200
191/191 [==============================] - 0s 78us/step - loss: 0.5401 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 85/200
191/191 [==============================] - 0s 84us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 86/200
191/191 [==============================] - 0s 99us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 87/200
191/191 [==============================] - 0s 84us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 88/200
191/191 [==============================] - 0s 94us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 89/200
191/191 [==============================] - 0s 84us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 90/200
191/191 [==============================] - 0s 110us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 91/200
191/191 [==============================] - 0s 89us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 92/200
191/191 [==============================] - 0s 84us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500

Epoch 00092: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 93/200
191/191 [==============================] - 0s 84us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 94/200
191/191 [==============================] - 0s 89us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 95/200
191/191 [==============================] - 0s 78us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 96/200
191/191 [==============================] - 0s 89us/step - loss: 0.5400 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 97/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 98/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4978 - val_accuracy: 0.7500
Epoch 99/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 100/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 101/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 102/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00102: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 103/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 104/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 105/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 106/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 107/200
191/191 [==============================] - 0s 110us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 108/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 109/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 110/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 111/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 112/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00112: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 113/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 114/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 115/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 116/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 117/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 118/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 119/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 120/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 121/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 122/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00122: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 123/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 124/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 125/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 126/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 127/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 128/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 129/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 130/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 131/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 132/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00132: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 133/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 134/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 135/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 136/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 137/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 138/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 139/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 140/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 141/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 142/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00142: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 143/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 144/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 145/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 146/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 147/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 148/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 149/200
191/191 [==============================] - 0s 115us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 150/200
191/191 [==============================] - 0s 120us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 151/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 152/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00152: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 153/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 154/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 155/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 156/200
191/191 [==============================] - 0s 105us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 157/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 158/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 159/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 160/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 161/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 162/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00162: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 163/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 164/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 165/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 166/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 167/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 168/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 169/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 170/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 171/200
191/191 [==============================] - 0s 105us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 172/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00172: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 173/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 174/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 175/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 176/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 177/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 178/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 179/200
191/191 [==============================] - 0s 99us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 180/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 181/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 182/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00182: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 183/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 184/200
191/191 [==============================] - 0s 183us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 185/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 186/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 187/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 188/200
191/191 [==============================] - 0s 105us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 189/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 190/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 191/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 192/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500

Epoch 00192: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 193/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 194/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 195/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 196/200
191/191 [==============================] - 0s 94us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 197/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 198/200
191/191 [==============================] - 0s 78us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 199/200
191/191 [==============================] - 0s 89us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
Epoch 200/200
191/191 [==============================] - 0s 84us/step - loss: 0.5399 - accuracy: 0.7277 - val_loss: 0.4979 - val_accuracy: 0.7500
In [163]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 200)
In [164]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 94us/step
test loss: 0.49787381291389465, test accuracy: 0.75
In [165]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6072041166380788
In [166]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))

print(confusion_matrix(y_test, y_pred))
Kappa:  0.05360443622920519
[[46  7]
 [ 9  2]]

KMeans

In [264]:
X
Out[264]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 0.898091 0.151819 -1.172713 0.474387 -0.020230 1.228657
1 0.618513 -0.762588 0.061946 0.944076 0.697880 0.021150
2 0.685649 0.002933 0.719805 -1.251700 -0.952424 1.444556
3 1.175209 -0.552349 0.336427 0.482978 -0.212146 -0.144225
4 1.350337 -1.407757 0.258917 -0.523670 0.099306 1.706064
5 0.907564 -1.769301 1.177857 -0.869472 0.392594 0.385760
6 -0.071420 -0.800769 0.238726 1.318866 -1.075628 -0.545006
7 0.476433 -1.202140 -1.713665 0.379487 -0.347674 0.777899
8 0.572039 -1.488738 -0.403914 -1.066061 -0.818836 0.339231
9 0.741137 0.139987 0.726307 1.670135 -0.317435 -1.091941
10 0.533655 -0.111619 0.435253 1.832919 -0.556933 -1.014603
11 -0.667308 0.502566 -1.137726 -0.714521 -0.497571 0.123297
12 0.161812 0.294263 0.659166 -0.336211 1.410350 -0.272418
13 -0.373777 -1.439681 0.009190 0.731635 0.138615 0.850511
14 0.745550 0.214669 0.209787 0.424963 0.448908 -0.204578
15 0.320726 0.108060 0.208510 -1.138882 -0.874041 -1.779091
16 0.646392 -0.726119 0.153724 -0.203580 -1.017329 -1.068601
17 -0.042981 -0.672256 0.358250 -0.385808 -0.341018 -1.823744
18 0.822192 0.184879 1.658679 1.705929 3.070140 -1.218005
19 0.175070 0.195153 1.969940 0.005043 0.430538 -1.502715
20 1.339692 -1.202498 0.487937 -0.769520 -1.973308 -0.400699
21 1.290923 -0.546138 0.120024 0.429258 -0.165681 0.856938
22 1.528224 -0.912727 0.962682 -0.386673 -0.772181 -0.291766
23 -0.486779 -1.124424 0.559106 0.746533 -1.101240 1.082216
24 -0.230729 0.999926 -0.678209 -0.175670 1.412258 0.572372
25 -0.632681 0.618852 -0.778803 -0.808112 -0.442115 -0.146177
26 -1.151505 -1.127449 1.500641 -0.822825 0.158380 0.792656
27 0.265739 -3.078847 -0.939567 0.268673 -0.642098 -0.984495
28 0.623357 -1.241561 -1.149654 1.231993 2.023015 -0.070476
29 0.930863 -1.763587 -1.608926 0.462097 -0.677599 -0.693427
... ... ... ... ... ... ...
225 -1.444140 -0.088370 -0.458428 0.530251 -0.475625 -0.057486
226 -0.297006 0.887935 0.467148 2.000374 -0.396849 -0.846195
227 -1.624166 0.777486 0.635044 -1.376180 0.998008 -0.910882
228 0.230618 1.438780 0.301556 -1.353873 -0.586627 -0.102947
229 -0.163123 1.329205 0.721279 -1.383030 0.540446 -1.181571
230 -1.337576 0.249897 0.081067 0.886335 -0.078090 -0.344245
231 0.304553 0.584052 0.915910 2.455180 1.007231 0.268298
232 -0.291785 0.247731 -0.740382 0.896773 0.457951 0.390640
233 -0.532056 1.686101 0.358185 -1.561985 0.911246 0.638759
234 -1.223692 0.723005 0.599197 -0.955626 0.653814 0.112686
235 1.412552 -0.817418 0.038464 -2.397710 -2.903923 1.454325
236 0.141392 -0.756740 -1.981390 -0.636588 0.230786 0.968907
237 1.157567 -0.442417 -1.342532 -0.893118 -0.552517 -0.791388
238 -1.683225 -0.036571 0.297162 -1.488549 1.387872 -0.306946
239 -0.997159 0.655257 2.239993 -1.422875 0.373101 0.159004
240 -1.142741 0.931927 1.440876 0.665641 -0.994237 -1.093039
241 -0.151675 -0.971306 0.447819 0.895444 -0.863907 0.150120
242 -0.837654 -1.170592 0.622658 0.448216 -0.830715 -0.222067
243 -0.059101 -0.857751 0.253657 0.272951 -0.833270 0.160823
244 1.455210 -1.123798 1.124970 -1.841854 -0.183521 -0.193778
245 1.459407 -1.071308 -0.261053 -0.731205 0.603463 0.358072
246 1.850117 -1.364586 1.015519 -1.479941 -1.262489 -0.485304
247 0.468703 0.776904 -1.200084 -0.109459 0.572206 0.353229
248 0.758187 -0.030802 -1.190930 -0.092637 0.048267 2.174173
249 0.465492 -0.042081 0.541343 0.584645 0.066443 -1.886670
250 -1.114193 1.666162 0.201458 -1.543125 -0.123758 -0.430641
251 -1.675129 1.101864 0.721966 -1.964153 0.827116 0.134812
252 -1.371728 0.888874 -0.186673 -0.931346 0.795500 -1.063218
253 0.221249 0.272024 -1.593712 -0.242394 0.752955 1.102656
254 -0.747040 1.308435 0.858494 -1.950134 1.779312 -0.711789

255 rows × 6 columns

In [265]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[265]:
[1530.0000000000002,
 1266.8988304034983,
 1085.4171102625123,
 963.5827926636907,
 872.5239995069635,
 797.6140851961846,
 747.1323294070899,
 703.670300371115,
 664.3614627122823,
 637.5590430281768,
 607.7011770650902,
 585.4389967082509,
 558.8506960652073,
 540.5660329891642]
In [266]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[266]:
[<matplotlib.lines.Line2D at 0x1b829e4f2e8>]

K=3

In [267]:
kmeans_tc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[267]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [268]:
kmeans_tc.labels_
Out[268]:
array([2, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2,
       0, 0, 1, 1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0, 0, 0,
       0, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0,
       1, 1, 1, 1, 1, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 1, 0, 1,
       1, 1, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1,
       1, 0, 1, 2, 0, 2, 2, 1, 1, 1, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0,
       2, 2, 2, 2, 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 2, 1, 2, 2, 2, 2, 1, 1,
       1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 2,
       1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 2, 2, 1, 0, 2, 1, 1, 1, 2, 1, 2,
       2, 2, 1, 1, 2, 2, 1, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 0, 2, 0, 2, 2, 0, 1, 1, 1, 2, 1])
In [269]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[269]:
array([2, 0, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2,
       0, 0, 1, 1, 0, 0, 0, 2, 2, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0, 0, 0,
       0, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 0,
       1, 1, 1, 1, 1, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 1, 0, 1,
       1, 1, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 0, 2, 0, 2, 2, 1, 1, 2, 1, 1,
       1, 0, 1, 2, 0, 2, 2, 1, 1, 1, 2, 2, 0, 2, 2, 2, 1, 0, 2, 1, 0, 0,
       2, 2, 2, 2, 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 2, 1, 2, 2, 2, 2, 1, 1,
       1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 2, 2,
       1, 2, 0, 1, 1, 2, 2, 2, 2, 0, 0, 2, 2, 1, 0, 2, 1, 1, 1, 2, 1, 2,
       2, 2, 1, 1, 2, 2, 1, 2, 0, 2, 0, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2,
       0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 2, 1, 1, 2, 2, 2, 1, 1, 1, 0,
       0, 0, 0, 2, 0, 2, 2, 0, 1, 1, 1, 2, 1])
In [270]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [271]:
X
Out[271]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 0.898091 0.151819 -1.172713 0.474387 -0.020230 1.228657 2 0
1 0.618513 -0.762588 0.061946 0.944076 0.697880 0.021150 0 0
2 0.685649 0.002933 0.719805 -1.251700 -0.952424 1.444556 2 0
3 1.175209 -0.552349 0.336427 0.482978 -0.212146 -0.144225 0 0
4 1.350337 -1.407757 0.258917 -0.523670 0.099306 1.706064 2 0
5 0.907564 -1.769301 1.177857 -0.869472 0.392594 0.385760 0 0
6 -0.071420 -0.800769 0.238726 1.318866 -1.075628 -0.545006 0 0
7 0.476433 -1.202140 -1.713665 0.379487 -0.347674 0.777899 2 0
8 0.572039 -1.488738 -0.403914 -1.066061 -0.818836 0.339231 2 0
9 0.741137 0.139987 0.726307 1.670135 -0.317435 -1.091941 0 0
10 0.533655 -0.111619 0.435253 1.832919 -0.556933 -1.014603 0 0
11 -0.667308 0.502566 -1.137726 -0.714521 -0.497571 0.123297 2 0
12 0.161812 0.294263 0.659166 -0.336211 1.410350 -0.272418 1 0
13 -0.373777 -1.439681 0.009190 0.731635 0.138615 0.850511 0 0
14 0.745550 0.214669 0.209787 0.424963 0.448908 -0.204578 0 0
15 0.320726 0.108060 0.208510 -1.138882 -0.874041 -1.779091 1 0
16 0.646392 -0.726119 0.153724 -0.203580 -1.017329 -1.068601 0 0
17 -0.042981 -0.672256 0.358250 -0.385808 -0.341018 -1.823744 0 0
18 0.822192 0.184879 1.658679 1.705929 3.070140 -1.218005 0 0
19 0.175070 0.195153 1.969940 0.005043 0.430538 -1.502715 0 0
20 1.339692 -1.202498 0.487937 -0.769520 -1.973308 -0.400699 0 0
21 1.290923 -0.546138 0.120024 0.429258 -0.165681 0.856938 2 0
22 1.528224 -0.912727 0.962682 -0.386673 -0.772181 -0.291766 0 0
23 -0.486779 -1.124424 0.559106 0.746533 -1.101240 1.082216 0 0
24 -0.230729 0.999926 -0.678209 -0.175670 1.412258 0.572372 1 0
25 -0.632681 0.618852 -0.778803 -0.808112 -0.442115 -0.146177 1 0
26 -1.151505 -1.127449 1.500641 -0.822825 0.158380 0.792656 0 0
27 0.265739 -3.078847 -0.939567 0.268673 -0.642098 -0.984495 0 0
28 0.623357 -1.241561 -1.149654 1.231993 2.023015 -0.070476 0 0
29 0.930863 -1.763587 -1.608926 0.462097 -0.677599 -0.693427 2 0
... ... ... ... ... ... ... ... ...
225 -1.444140 -0.088370 -0.458428 0.530251 -0.475625 -0.057486 1 1
226 -0.297006 0.887935 0.467148 2.000374 -0.396849 -0.846195 0 1
227 -1.624166 0.777486 0.635044 -1.376180 0.998008 -0.910882 1 1
228 0.230618 1.438780 0.301556 -1.353873 -0.586627 -0.102947 1 1
229 -0.163123 1.329205 0.721279 -1.383030 0.540446 -1.181571 1 1
230 -1.337576 0.249897 0.081067 0.886335 -0.078090 -0.344245 1 1
231 0.304553 0.584052 0.915910 2.455180 1.007231 0.268298 0 1
232 -0.291785 0.247731 -0.740382 0.896773 0.457951 0.390640 2 1
233 -0.532056 1.686101 0.358185 -1.561985 0.911246 0.638759 1 1
234 -1.223692 0.723005 0.599197 -0.955626 0.653814 0.112686 1 1
235 1.412552 -0.817418 0.038464 -2.397710 -2.903923 1.454325 2 1
236 0.141392 -0.756740 -1.981390 -0.636588 0.230786 0.968907 2 1
237 1.157567 -0.442417 -1.342532 -0.893118 -0.552517 -0.791388 2 1
238 -1.683225 -0.036571 0.297162 -1.488549 1.387872 -0.306946 1 1
239 -0.997159 0.655257 2.239993 -1.422875 0.373101 0.159004 1 1
240 -1.142741 0.931927 1.440876 0.665641 -0.994237 -1.093039 1 1
241 -0.151675 -0.971306 0.447819 0.895444 -0.863907 0.150120 0 1
242 -0.837654 -1.170592 0.622658 0.448216 -0.830715 -0.222067 0 1
243 -0.059101 -0.857751 0.253657 0.272951 -0.833270 0.160823 0 1
244 1.455210 -1.123798 1.124970 -1.841854 -0.183521 -0.193778 0 1
245 1.459407 -1.071308 -0.261053 -0.731205 0.603463 0.358072 2 1
246 1.850117 -1.364586 1.015519 -1.479941 -1.262489 -0.485304 0 1
247 0.468703 0.776904 -1.200084 -0.109459 0.572206 0.353229 2 1
248 0.758187 -0.030802 -1.190930 -0.092637 0.048267 2.174173 2 1
249 0.465492 -0.042081 0.541343 0.584645 0.066443 -1.886670 0 1
250 -1.114193 1.666162 0.201458 -1.543125 -0.123758 -0.430641 1 1
251 -1.675129 1.101864 0.721966 -1.964153 0.827116 0.134812 1 1
252 -1.371728 0.888874 -0.186673 -0.931346 0.795500 -1.063218 1 1
253 0.221249 0.272024 -1.593712 -0.242394 0.752955 1.102656 2 1
254 -0.747040 1.308435 0.858494 -1.950134 1.779312 -0.711789 1 1

255 rows × 8 columns

In [272]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[272]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b829fb8470>
In [273]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

Gramma

ANN

In [167]:
X = df_n_ps_std_tc[2]
In [168]:
y = df_n_ps[2]['chosen']
In [169]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [170]:
X_train.shape
Out[170]:
(162, 6)
In [171]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [172]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [173]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [281]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (10,), 'learning_rate_init': 0.004, 'max_iter': 20}, que permiten obtener un Accuracy de 75.31% y un Kappa del 17.51
Tiempo total: 35.80 minutos
In [174]:
grid.best_params_={'activation': 'relu', 'hidden_layer_sizes': (10,), 'learning_rate_init': 0.004, 'max_iter': 20}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [175]:
input_tensor = Input(shape = (n0,))
In [176]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [177]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [178]:
model.summary()
Model: "model_9"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_10 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_26 (Dense)             (None, 10)                70        
_________________________________________________________________
dense_27 (Dense)             (None, 1)                 11        
=================================================================
Total params: 81
Trainable params: 81
Non-trainable params: 0
_________________________________________________________________
In [179]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 162 samples, validate on 54 samples
Epoch 1/20
162/162 [==============================] - 0s 1ms/step - loss: 0.7228 - accuracy: 0.6049 - val_loss: 0.7926 - val_accuracy: 0.6111
Epoch 2/20
162/162 [==============================] - 0s 74us/step - loss: 0.6740 - accuracy: 0.5988 - val_loss: 0.7654 - val_accuracy: 0.5741
Epoch 3/20
162/162 [==============================] - 0s 68us/step - loss: 0.6433 - accuracy: 0.6235 - val_loss: 0.7441 - val_accuracy: 0.5556
Epoch 4/20
162/162 [==============================] - 0s 62us/step - loss: 0.6202 - accuracy: 0.6420 - val_loss: 0.7311 - val_accuracy: 0.6111
Epoch 5/20
162/162 [==============================] - 0s 74us/step - loss: 0.6033 - accuracy: 0.6667 - val_loss: 0.7230 - val_accuracy: 0.6111
Epoch 6/20
162/162 [==============================] - 0s 68us/step - loss: 0.5913 - accuracy: 0.6914 - val_loss: 0.7176 - val_accuracy: 0.6296
Epoch 7/20
162/162 [==============================] - 0s 74us/step - loss: 0.5812 - accuracy: 0.6790 - val_loss: 0.7122 - val_accuracy: 0.6296
Epoch 8/20
162/162 [==============================] - 0s 74us/step - loss: 0.5741 - accuracy: 0.7037 - val_loss: 0.7077 - val_accuracy: 0.6296
Epoch 9/20
162/162 [==============================] - 0s 68us/step - loss: 0.5698 - accuracy: 0.7037 - val_loss: 0.7029 - val_accuracy: 0.6111
Epoch 10/20
162/162 [==============================] - 0s 62us/step - loss: 0.5642 - accuracy: 0.7037 - val_loss: 0.6976 - val_accuracy: 0.6296
Epoch 11/20
162/162 [==============================] - 0s 80us/step - loss: 0.5621 - accuracy: 0.7037 - val_loss: 0.6935 - val_accuracy: 0.6296
Epoch 12/20
162/162 [==============================] - 0s 80us/step - loss: 0.5597 - accuracy: 0.7037 - val_loss: 0.6922 - val_accuracy: 0.6296
Epoch 13/20
162/162 [==============================] - 0s 74us/step - loss: 0.5576 - accuracy: 0.7037 - val_loss: 0.6911 - val_accuracy: 0.6296
Epoch 14/20
162/162 [==============================] - 0s 68us/step - loss: 0.5555 - accuracy: 0.7037 - val_loss: 0.6896 - val_accuracy: 0.6296
Epoch 15/20
162/162 [==============================] - 0s 62us/step - loss: 0.5526 - accuracy: 0.7099 - val_loss: 0.6879 - val_accuracy: 0.6296
Epoch 16/20
162/162 [==============================] - 0s 80us/step - loss: 0.5515 - accuracy: 0.7160 - val_loss: 0.6827 - val_accuracy: 0.6111

Epoch 00016: ReduceLROnPlateau reducing learning rate to 0.0020000000949949026.
Epoch 17/20
162/162 [==============================] - 0s 111us/step - loss: 0.5487 - accuracy: 0.7160 - val_loss: 0.6800 - val_accuracy: 0.6111
Epoch 18/20
162/162 [==============================] - 0s 99us/step - loss: 0.5471 - accuracy: 0.7099 - val_loss: 0.6782 - val_accuracy: 0.6111
Epoch 19/20
162/162 [==============================] - 0s 86us/step - loss: 0.5465 - accuracy: 0.7160 - val_loss: 0.6764 - val_accuracy: 0.6111
Epoch 20/20
162/162 [==============================] - 0s 80us/step - loss: 0.5455 - accuracy: 0.7160 - val_loss: 0.6744 - val_accuracy: 0.6111
In [180]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 20)
In [181]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
54/54 [==============================] - 0s 74us/step
test loss: 0.6743692181728504, test accuracy: 0.6111111044883728
In [182]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.5833333333333334
In [183]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  -0.10526315789473695
[[33  3]
 [18  0]]

KMeans

In [292]:
X
Out[292]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 0.618349 0.564005 0.091611 1.885208 0.563756 1.803892
1 -0.149103 -1.303101 -0.498811 0.416525 -0.660751 -1.179350
2 -1.141294 -1.317570 0.575363 -1.560836 0.336340 1.197709
3 -0.988346 -0.540855 1.006800 -0.214650 -0.364940 0.068869
4 -0.640925 -0.228256 0.461986 -1.274446 -1.494581 -0.125833
5 0.651809 -1.155045 -1.823509 -0.572302 -0.125839 1.114927
6 0.451997 -0.464785 -1.036654 0.334704 -0.130396 0.630280
7 -0.013544 -0.797079 -0.417203 0.331839 0.387259 -0.274179
8 0.970875 1.445790 1.566960 0.586074 -0.399408 0.012962
9 0.807403 1.144660 -0.144756 0.683601 -0.910010 1.250279
10 0.694252 1.111013 -0.298027 1.482108 0.290008 2.378749
11 0.812756 1.601196 0.779411 -0.137772 -0.049154 -0.449505
12 0.108497 1.484673 -1.479839 -0.228556 -1.160260 -0.264037
13 -0.204479 1.323672 0.724265 -0.881555 0.971039 -2.057333
14 -1.530107 -0.193077 -1.866283 -1.078546 1.564324 0.948469
15 -1.432547 0.905544 -0.826578 -0.162205 0.225217 -1.043164
16 -1.690034 0.636522 -1.777337 0.332901 1.464282 -1.494455
17 1.518335 0.937667 2.799725 0.433386 0.422216 -1.047176
18 0.684979 0.531321 1.125361 -1.158059 0.870596 -1.396783
19 1.308654 0.456382 1.782806 2.045171 0.583938 0.553846
20 0.778796 -0.726044 0.453225 -0.157245 0.172772 -0.367794
21 0.504756 -0.482434 -0.040754 0.332550 0.158974 0.598871
22 0.781394 -0.663174 0.549780 -0.440383 -1.459304 -0.365324
23 0.146289 1.536025 0.055630 -0.514739 0.417498 -0.136146
24 0.355919 0.841372 0.955924 -0.037753 -0.158570 -0.320586
25 0.037665 1.886176 0.119317 0.033897 0.626855 -0.100332
26 -1.466288 0.559979 0.806653 -1.415049 1.973619 0.460540
27 -0.476036 0.236137 1.017872 1.228768 -0.501363 -0.256857
28 -0.005095 -2.023777 -0.849806 0.691870 0.593777 0.117392
29 -1.739289 0.517216 -0.791108 0.066990 1.730562 -0.382581
... ... ... ... ... ... ...
186 -0.209907 -0.890861 -1.480749 1.818309 -0.529062 -0.274082
187 0.272964 -1.573686 -1.429667 -0.280854 1.188917 0.951888
188 -0.887693 0.489524 0.498612 0.954119 -0.098669 0.105317
189 -1.002974 0.451334 0.382768 0.817178 -0.070872 -0.338093
190 -1.292658 0.953945 0.850772 0.522089 0.080980 -0.212195
191 1.459775 0.362402 1.602060 0.094609 1.169480 -0.588678
192 1.923340 0.915773 1.920236 1.121678 0.547246 0.737895
193 1.454162 0.509817 1.761284 0.561861 1.164134 -0.911890
194 1.480242 -0.198385 -0.064509 -0.994178 -1.627007 -0.646308
195 1.049366 -0.397640 0.632599 -0.627315 -1.290501 -0.733029
196 1.338278 -0.473726 0.747138 -0.872384 -1.426898 -0.885808
197 0.405835 1.250358 -0.440807 0.810338 -0.858610 1.349151
198 0.181164 0.930372 -0.282564 0.193104 -0.467723 0.358204
199 0.670208 1.198906 -0.303904 -0.039900 -0.757903 0.813867
200 -0.765252 -0.882395 0.316745 1.160239 -0.216695 -0.363017
201 -0.362519 -1.407213 -1.115582 0.682467 1.234600 0.157507
202 0.146885 -1.935391 0.147533 0.380456 2.154532 1.489885
203 0.828916 -0.454682 -0.332837 0.712552 0.806644 0.818245
204 0.763529 -1.226092 0.321224 -0.239370 -1.883123 0.931987
205 1.016504 -0.832857 -0.443373 -0.082946 0.382222 0.809814
206 0.227287 -0.956642 -0.917348 -0.459838 -0.185694 -0.045102
207 0.673549 -0.883374 -1.272948 0.092401 0.071859 -0.695644
208 0.720898 -1.292898 -0.751577 -0.256872 0.665717 0.628246
209 -0.792808 0.902811 0.937273 1.790379 0.060934 0.443300
210 -1.327416 1.434383 2.722415 1.254539 0.971727 1.122808
211 -0.784226 0.939410 1.388065 1.619603 -0.624808 0.083838
212 -1.373692 0.720443 -0.358383 0.586952 -0.315282 -0.239589
213 -1.288536 0.664754 -0.497221 0.792405 -0.824437 0.490092
214 0.335584 -0.975748 -1.771014 0.289243 2.297665 0.370714
215 0.882521 2.087302 -1.396329 -2.436266 -0.577825 -1.293233

216 rows × 6 columns

In [293]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[293]:
[1296.0000000000002,
 1080.8577066903854,
 933.0740864307027,
 818.2780016547574,
 739.8707939120957,
 660.7782107753561,
 616.9452004899513,
 572.5504610784069,
 524.3523691729067,
 492.5848935457627,
 457.9596509280891,
 439.47748328257137,
 422.97179760296183,
 407.2321746861259]
In [294]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[294]:
[<matplotlib.lines.Line2D at 0x1b82cc45550>]

K=4

In [295]:
kmeans_tc = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[295]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [296]:
kmeans_tc.labels_
Out[296]:
array([3, 1, 3, 1, 2, 3, 3, 3, 2, 3, 3, 2, 0, 0, 0, 0, 0, 2, 2, 1, 2, 3,
       2, 0, 2, 0, 0, 1, 3, 0, 3, 3, 3, 1, 0, 3, 3, 3, 1, 1, 1, 3, 3, 3,
       3, 3, 1, 2, 2, 2, 1, 1, 1, 0, 0, 0, 3, 3, 1, 3, 0, 1, 0, 3, 3, 1,
       2, 1, 1, 1, 1, 2, 2, 2, 0, 0, 3, 1, 0, 1, 3, 3, 3, 0, 1, 0, 2, 2,
       2, 3, 0, 0, 0, 3, 1, 1, 1, 1, 3, 3, 3, 2, 0, 1, 1, 1, 3, 3, 0, 3,
       0, 2, 2, 2, 3, 0, 0, 1, 0, 0, 2, 2, 0, 1, 1, 1, 1, 1, 1, 3, 3, 2,
       2, 2, 3, 0, 1, 3, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2, 3,
       2, 3, 2, 1, 3, 3, 0, 1, 3, 3, 3, 0, 1, 0, 2, 3, 3, 0, 0, 3, 0, 0,
       0, 2, 3, 3, 0, 1, 1, 3, 1, 0, 1, 3, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3,
       3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 3, 0])
In [297]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[297]:
array([3, 1, 3, 1, 2, 3, 3, 3, 2, 3, 3, 2, 0, 0, 0, 0, 0, 2, 2, 1, 2, 3,
       2, 0, 2, 0, 0, 1, 3, 0, 3, 3, 3, 1, 0, 3, 3, 3, 1, 1, 1, 3, 3, 3,
       3, 3, 1, 2, 2, 2, 1, 1, 1, 0, 0, 0, 3, 3, 1, 3, 0, 1, 0, 3, 3, 1,
       2, 1, 1, 1, 1, 2, 2, 2, 0, 0, 3, 1, 0, 1, 3, 3, 3, 0, 1, 0, 2, 2,
       2, 3, 0, 0, 0, 3, 1, 1, 1, 1, 3, 3, 3, 2, 0, 1, 1, 1, 3, 3, 0, 3,
       0, 2, 2, 2, 3, 0, 0, 1, 0, 0, 2, 2, 0, 1, 1, 1, 1, 1, 1, 3, 3, 2,
       2, 2, 3, 0, 1, 3, 1, 1, 0, 0, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2, 3,
       2, 3, 2, 1, 3, 3, 0, 1, 3, 3, 3, 0, 1, 0, 2, 3, 3, 0, 0, 3, 0, 0,
       0, 2, 3, 3, 0, 1, 1, 3, 1, 0, 1, 3, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3,
       3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 3, 0])
In [298]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [299]:
X
Out[299]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 0.618349 0.564005 0.091611 1.885208 0.563756 1.803892 3 0
1 -0.149103 -1.303101 -0.498811 0.416525 -0.660751 -1.179350 1 0
2 -1.141294 -1.317570 0.575363 -1.560836 0.336340 1.197709 3 0
3 -0.988346 -0.540855 1.006800 -0.214650 -0.364940 0.068869 1 0
4 -0.640925 -0.228256 0.461986 -1.274446 -1.494581 -0.125833 2 0
5 0.651809 -1.155045 -1.823509 -0.572302 -0.125839 1.114927 3 0
6 0.451997 -0.464785 -1.036654 0.334704 -0.130396 0.630280 3 0
7 -0.013544 -0.797079 -0.417203 0.331839 0.387259 -0.274179 3 0
8 0.970875 1.445790 1.566960 0.586074 -0.399408 0.012962 2 0
9 0.807403 1.144660 -0.144756 0.683601 -0.910010 1.250279 3 0
10 0.694252 1.111013 -0.298027 1.482108 0.290008 2.378749 3 0
11 0.812756 1.601196 0.779411 -0.137772 -0.049154 -0.449505 2 0
12 0.108497 1.484673 -1.479839 -0.228556 -1.160260 -0.264037 0 0
13 -0.204479 1.323672 0.724265 -0.881555 0.971039 -2.057333 0 0
14 -1.530107 -0.193077 -1.866283 -1.078546 1.564324 0.948469 0 0
15 -1.432547 0.905544 -0.826578 -0.162205 0.225217 -1.043164 0 0
16 -1.690034 0.636522 -1.777337 0.332901 1.464282 -1.494455 0 0
17 1.518335 0.937667 2.799725 0.433386 0.422216 -1.047176 2 0
18 0.684979 0.531321 1.125361 -1.158059 0.870596 -1.396783 2 0
19 1.308654 0.456382 1.782806 2.045171 0.583938 0.553846 1 0
20 0.778796 -0.726044 0.453225 -0.157245 0.172772 -0.367794 2 0
21 0.504756 -0.482434 -0.040754 0.332550 0.158974 0.598871 3 0
22 0.781394 -0.663174 0.549780 -0.440383 -1.459304 -0.365324 2 0
23 0.146289 1.536025 0.055630 -0.514739 0.417498 -0.136146 0 0
24 0.355919 0.841372 0.955924 -0.037753 -0.158570 -0.320586 2 0
25 0.037665 1.886176 0.119317 0.033897 0.626855 -0.100332 0 0
26 -1.466288 0.559979 0.806653 -1.415049 1.973619 0.460540 0 0
27 -0.476036 0.236137 1.017872 1.228768 -0.501363 -0.256857 1 0
28 -0.005095 -2.023777 -0.849806 0.691870 0.593777 0.117392 3 0
29 -1.739289 0.517216 -0.791108 0.066990 1.730562 -0.382581 0 0
... ... ... ... ... ... ... ... ...
186 -0.209907 -0.890861 -1.480749 1.818309 -0.529062 -0.274082 1 1
187 0.272964 -1.573686 -1.429667 -0.280854 1.188917 0.951888 3 1
188 -0.887693 0.489524 0.498612 0.954119 -0.098669 0.105317 1 1
189 -1.002974 0.451334 0.382768 0.817178 -0.070872 -0.338093 1 1
190 -1.292658 0.953945 0.850772 0.522089 0.080980 -0.212195 1 1
191 1.459775 0.362402 1.602060 0.094609 1.169480 -0.588678 2 1
192 1.923340 0.915773 1.920236 1.121678 0.547246 0.737895 2 1
193 1.454162 0.509817 1.761284 0.561861 1.164134 -0.911890 2 1
194 1.480242 -0.198385 -0.064509 -0.994178 -1.627007 -0.646308 2 1
195 1.049366 -0.397640 0.632599 -0.627315 -1.290501 -0.733029 2 1
196 1.338278 -0.473726 0.747138 -0.872384 -1.426898 -0.885808 2 1
197 0.405835 1.250358 -0.440807 0.810338 -0.858610 1.349151 3 1
198 0.181164 0.930372 -0.282564 0.193104 -0.467723 0.358204 3 1
199 0.670208 1.198906 -0.303904 -0.039900 -0.757903 0.813867 3 1
200 -0.765252 -0.882395 0.316745 1.160239 -0.216695 -0.363017 1 1
201 -0.362519 -1.407213 -1.115582 0.682467 1.234600 0.157507 3 1
202 0.146885 -1.935391 0.147533 0.380456 2.154532 1.489885 3 1
203 0.828916 -0.454682 -0.332837 0.712552 0.806644 0.818245 3 1
204 0.763529 -1.226092 0.321224 -0.239370 -1.883123 0.931987 3 1
205 1.016504 -0.832857 -0.443373 -0.082946 0.382222 0.809814 3 1
206 0.227287 -0.956642 -0.917348 -0.459838 -0.185694 -0.045102 3 1
207 0.673549 -0.883374 -1.272948 0.092401 0.071859 -0.695644 3 1
208 0.720898 -1.292898 -0.751577 -0.256872 0.665717 0.628246 3 1
209 -0.792808 0.902811 0.937273 1.790379 0.060934 0.443300 1 1
210 -1.327416 1.434383 2.722415 1.254539 0.971727 1.122808 1 1
211 -0.784226 0.939410 1.388065 1.619603 -0.624808 0.083838 1 1
212 -1.373692 0.720443 -0.358383 0.586952 -0.315282 -0.239589 1 1
213 -1.288536 0.664754 -0.497221 0.792405 -0.824437 0.490092 1 1
214 0.335584 -0.975748 -1.771014 0.289243 2.297665 0.370714 3 1
215 0.882521 2.087302 -1.396329 -2.436266 -0.577825 -1.293233 0 1

216 rows × 8 columns

In [300]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[300]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b82ccc8160>
In [301]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

Hotel Marrakech

ANN

In [200]:
X = df_n_ps_std_tc[3]
In [201]:
y = df_n_ps[3]['chosen']
In [202]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [203]:
X_train.shape
Out[203]:
(108, 6)
In [204]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [205]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [206]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [309]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (20, 20), 'learning_rate_init': 0.003, 'max_iter': 2000}, que permiten obtener un Accuracy de 79.63% y un Kappa del 57.62
Tiempo total: 20.07 minutos
In [207]:
grid.best_params_= {'activation': 'tanh', 'hidden_layer_sizes': (20, 20), 'learning_rate_init': 0.003, 'max_iter': 2000}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [208]:
input_tensor = Input(shape = (n0,))
In [209]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [210]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [211]:
model.summary()
Model: "model_10"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_11 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_28 (Dense)             (None, 20)                140       
_________________________________________________________________
dense_29 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_30 (Dense)             (None, 1)                 21        
=================================================================
Total params: 581
Trainable params: 581
Non-trainable params: 0
_________________________________________________________________
In [212]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 108 samples, validate on 36 samples
Epoch 1/2000
108/108 [==============================] - 0s 2ms/step - loss: 0.6846 - accuracy: 0.5741 - val_loss: 0.7108 - val_accuracy: 0.5833
Epoch 2/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6691 - accuracy: 0.6204 - val_loss: 0.7129 - val_accuracy: 0.5000
Epoch 3/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6606 - accuracy: 0.6481 - val_loss: 0.7103 - val_accuracy: 0.5278
Epoch 4/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6552 - accuracy: 0.6574 - val_loss: 0.7159 - val_accuracy: 0.5000
Epoch 5/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6532 - accuracy: 0.6481 - val_loss: 0.7112 - val_accuracy: 0.5278
Epoch 6/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6503 - accuracy: 0.6389 - val_loss: 0.7059 - val_accuracy: 0.5556
Epoch 7/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6476 - accuracy: 0.6389 - val_loss: 0.6971 - val_accuracy: 0.5833
Epoch 8/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6460 - accuracy: 0.6667 - val_loss: 0.6922 - val_accuracy: 0.5833
Epoch 9/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6462 - accuracy: 0.6481 - val_loss: 0.6955 - val_accuracy: 0.5833
Epoch 10/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6435 - accuracy: 0.6574 - val_loss: 0.6958 - val_accuracy: 0.6111
Epoch 11/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6416 - accuracy: 0.6759 - val_loss: 0.6984 - val_accuracy: 0.6111
Epoch 12/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6403 - accuracy: 0.6667 - val_loss: 0.6955 - val_accuracy: 0.5833
Epoch 13/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6377 - accuracy: 0.6667 - val_loss: 0.6941 - val_accuracy: 0.5833
Epoch 14/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6366 - accuracy: 0.6667 - val_loss: 0.6967 - val_accuracy: 0.5278
Epoch 15/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6362 - accuracy: 0.6574 - val_loss: 0.6968 - val_accuracy: 0.5278
Epoch 16/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6350 - accuracy: 0.6574 - val_loss: 0.7024 - val_accuracy: 0.5278
Epoch 17/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6328 - accuracy: 0.6667 - val_loss: 0.7004 - val_accuracy: 0.5000
Epoch 18/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6312 - accuracy: 0.6667 - val_loss: 0.6960 - val_accuracy: 0.5000
Epoch 19/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6297 - accuracy: 0.6574 - val_loss: 0.6974 - val_accuracy: 0.5000
Epoch 20/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6269 - accuracy: 0.6481 - val_loss: 0.6949 - val_accuracy: 0.5278

Epoch 00020: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 21/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6256 - accuracy: 0.6574 - val_loss: 0.6913 - val_accuracy: 0.5278
Epoch 22/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6245 - accuracy: 0.6574 - val_loss: 0.6865 - val_accuracy: 0.5278
Epoch 23/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6237 - accuracy: 0.6574 - val_loss: 0.6851 - val_accuracy: 0.5556
Epoch 24/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6229 - accuracy: 0.6481 - val_loss: 0.6853 - val_accuracy: 0.5556
Epoch 25/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6223 - accuracy: 0.6574 - val_loss: 0.6850 - val_accuracy: 0.5556
Epoch 26/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6208 - accuracy: 0.6574 - val_loss: 0.6852 - val_accuracy: 0.5556
Epoch 27/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6202 - accuracy: 0.6574 - val_loss: 0.6840 - val_accuracy: 0.5556
Epoch 28/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6190 - accuracy: 0.6574 - val_loss: 0.6850 - val_accuracy: 0.5278
Epoch 29/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6182 - accuracy: 0.6667 - val_loss: 0.6840 - val_accuracy: 0.5278
Epoch 30/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6169 - accuracy: 0.6667 - val_loss: 0.6847 - val_accuracy: 0.5278

Epoch 00030: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 31/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6157 - accuracy: 0.6667 - val_loss: 0.6870 - val_accuracy: 0.5278
Epoch 32/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6152 - accuracy: 0.6759 - val_loss: 0.6882 - val_accuracy: 0.5278
Epoch 33/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6148 - accuracy: 0.6759 - val_loss: 0.6879 - val_accuracy: 0.5278
Epoch 34/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6141 - accuracy: 0.6759 - val_loss: 0.6886 - val_accuracy: 0.5278
Epoch 35/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6135 - accuracy: 0.6759 - val_loss: 0.6892 - val_accuracy: 0.5278
Epoch 36/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6131 - accuracy: 0.6759 - val_loss: 0.6894 - val_accuracy: 0.5000
Epoch 37/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6125 - accuracy: 0.6759 - val_loss: 0.6904 - val_accuracy: 0.5000
Epoch 38/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6119 - accuracy: 0.6759 - val_loss: 0.6905 - val_accuracy: 0.5000
Epoch 39/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6113 - accuracy: 0.6759 - val_loss: 0.6909 - val_accuracy: 0.5000
Epoch 40/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6107 - accuracy: 0.6759 - val_loss: 0.6911 - val_accuracy: 0.5000

Epoch 00040: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 41/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6102 - accuracy: 0.6759 - val_loss: 0.6915 - val_accuracy: 0.5000
Epoch 42/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6099 - accuracy: 0.6759 - val_loss: 0.6919 - val_accuracy: 0.5000
Epoch 43/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6096 - accuracy: 0.6759 - val_loss: 0.6924 - val_accuracy: 0.5000
Epoch 44/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6095 - accuracy: 0.6759 - val_loss: 0.6931 - val_accuracy: 0.5000
Epoch 45/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6091 - accuracy: 0.6759 - val_loss: 0.6933 - val_accuracy: 0.5000
Epoch 46/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6088 - accuracy: 0.6759 - val_loss: 0.6934 - val_accuracy: 0.5000
Epoch 47/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6086 - accuracy: 0.6759 - val_loss: 0.6933 - val_accuracy: 0.5000
Epoch 48/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6083 - accuracy: 0.6759 - val_loss: 0.6934 - val_accuracy: 0.5000
Epoch 49/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6081 - accuracy: 0.6759 - val_loss: 0.6942 - val_accuracy: 0.5000
Epoch 50/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6077 - accuracy: 0.6852 - val_loss: 0.6948 - val_accuracy: 0.5000

Epoch 00050: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 51/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6074 - accuracy: 0.6852 - val_loss: 0.6946 - val_accuracy: 0.5000
Epoch 52/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6073 - accuracy: 0.6759 - val_loss: 0.6943 - val_accuracy: 0.5000
Epoch 53/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6072 - accuracy: 0.6759 - val_loss: 0.6941 - val_accuracy: 0.5000
Epoch 54/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6071 - accuracy: 0.6759 - val_loss: 0.6941 - val_accuracy: 0.5000
Epoch 55/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6070 - accuracy: 0.6759 - val_loss: 0.6941 - val_accuracy: 0.5000
Epoch 56/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6068 - accuracy: 0.6759 - val_loss: 0.6939 - val_accuracy: 0.5000
Epoch 57/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6066 - accuracy: 0.6759 - val_loss: 0.6938 - val_accuracy: 0.5000
Epoch 58/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6065 - accuracy: 0.6852 - val_loss: 0.6938 - val_accuracy: 0.5000
Epoch 59/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6063 - accuracy: 0.6852 - val_loss: 0.6936 - val_accuracy: 0.5000
Epoch 60/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6061 - accuracy: 0.6852 - val_loss: 0.6933 - val_accuracy: 0.5000

Epoch 00060: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 61/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6061 - accuracy: 0.6852 - val_loss: 0.6932 - val_accuracy: 0.5000
Epoch 62/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6060 - accuracy: 0.6944 - val_loss: 0.6931 - val_accuracy: 0.5000
Epoch 63/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6059 - accuracy: 0.6944 - val_loss: 0.6930 - val_accuracy: 0.5000
Epoch 64/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6058 - accuracy: 0.6944 - val_loss: 0.6930 - val_accuracy: 0.5000
Epoch 65/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6057 - accuracy: 0.6944 - val_loss: 0.6929 - val_accuracy: 0.5000
Epoch 66/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6056 - accuracy: 0.6944 - val_loss: 0.6928 - val_accuracy: 0.5000
Epoch 67/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6056 - accuracy: 0.6944 - val_loss: 0.6925 - val_accuracy: 0.5000
Epoch 68/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6055 - accuracy: 0.6944 - val_loss: 0.6924 - val_accuracy: 0.5000
Epoch 69/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6054 - accuracy: 0.6944 - val_loss: 0.6925 - val_accuracy: 0.5000
Epoch 70/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6053 - accuracy: 0.6944 - val_loss: 0.6924 - val_accuracy: 0.5000

Epoch 00070: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 71/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6053 - accuracy: 0.6944 - val_loss: 0.6925 - val_accuracy: 0.5000
Epoch 72/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6052 - accuracy: 0.6944 - val_loss: 0.6925 - val_accuracy: 0.5000
Epoch 73/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6052 - accuracy: 0.6944 - val_loss: 0.6924 - val_accuracy: 0.5000
Epoch 74/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6051 - accuracy: 0.6944 - val_loss: 0.6923 - val_accuracy: 0.5000
Epoch 75/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6051 - accuracy: 0.6944 - val_loss: 0.6922 - val_accuracy: 0.5000
Epoch 76/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6051 - accuracy: 0.6944 - val_loss: 0.6922 - val_accuracy: 0.5000
Epoch 77/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6050 - accuracy: 0.6944 - val_loss: 0.6922 - val_accuracy: 0.5000
Epoch 78/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6050 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 79/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6050 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 80/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6049 - accuracy: 0.6944 - val_loss: 0.6922 - val_accuracy: 0.5000

Epoch 00080: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 81/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6049 - accuracy: 0.6944 - val_loss: 0.6922 - val_accuracy: 0.5000
Epoch 82/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6049 - accuracy: 0.6944 - val_loss: 0.6922 - val_accuracy: 0.5000
Epoch 83/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6048 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 84/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6048 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 85/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6048 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 86/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6048 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 87/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6048 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 88/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6047 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 89/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6047 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 90/2000
108/108 [==============================] - 0s 74us/step - loss: 0.6047 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000

Epoch 00090: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 91/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6047 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 92/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6047 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 93/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6047 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 94/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 95/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 96/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6921 - val_accuracy: 0.5000
Epoch 97/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 98/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 99/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 100/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6046 - accuracy: 0.6944 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00100: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 101/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 102/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 103/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 104/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 105/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 106/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 107/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6046 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 108/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 109/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 110/2000
108/108 [==============================] - 0s 83us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00110: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 111/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 112/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 113/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 114/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 115/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 116/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 117/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 118/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 119/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 120/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00120: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 121/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 122/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 123/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 124/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 125/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 126/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 127/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 128/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 129/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 130/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00130: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 131/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 132/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 133/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 134/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 135/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 136/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 137/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 138/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 139/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 140/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00140: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 141/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 142/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 143/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 144/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 145/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 146/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 147/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 148/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 149/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 150/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00150: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 151/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 152/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 153/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 154/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 155/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 156/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 157/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 158/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 159/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 160/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00160: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 161/2000
108/108 [==============================] - 0s 222us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 162/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 163/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 164/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 165/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 166/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 167/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 168/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 169/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 170/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00170: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 171/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 172/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 173/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 174/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 175/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 176/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 177/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 178/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 179/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 180/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00180: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 181/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 182/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 183/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 184/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 185/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 186/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 187/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 188/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 189/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 190/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00190: ReduceLROnPlateau reducing learning rate to 1.1444091896350983e-08.
Epoch 191/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 192/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 193/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 194/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 195/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 196/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 197/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 198/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 199/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 200/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00200: ReduceLROnPlateau reducing learning rate to 5.7220459481754915e-09.
Epoch 201/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 202/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 203/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 204/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 205/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 206/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 207/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 208/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 209/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 210/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00210: ReduceLROnPlateau reducing learning rate to 2.8610229740877458e-09.
Epoch 211/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 212/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 213/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 214/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 215/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 216/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 217/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 218/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 219/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 220/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00220: ReduceLROnPlateau reducing learning rate to 1.4305114870438729e-09.
Epoch 221/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 222/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 223/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 224/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 225/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 226/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 227/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 228/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 229/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 230/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00230: ReduceLROnPlateau reducing learning rate to 7.152557435219364e-10.
Epoch 231/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 232/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 233/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 234/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 235/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 236/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 237/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 238/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 239/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 240/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00240: ReduceLROnPlateau reducing learning rate to 3.576278717609682e-10.
Epoch 241/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 242/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 243/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 244/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 245/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 246/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 247/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 248/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 249/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 250/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00250: ReduceLROnPlateau reducing learning rate to 1.788139358804841e-10.
Epoch 251/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 252/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 253/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 254/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 255/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 256/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 257/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 258/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 259/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 260/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00260: ReduceLROnPlateau reducing learning rate to 8.940696794024205e-11.
Epoch 261/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 262/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 263/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 264/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 265/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 266/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 267/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 268/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 269/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 270/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00270: ReduceLROnPlateau reducing learning rate to 4.470348397012103e-11.
Epoch 271/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 272/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 273/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 274/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 275/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 276/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 277/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 278/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 279/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 280/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00280: ReduceLROnPlateau reducing learning rate to 2.2351741985060514e-11.
Epoch 281/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 282/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 283/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 284/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 285/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 286/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 287/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 288/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 289/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 290/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00290: ReduceLROnPlateau reducing learning rate to 1.1175870992530257e-11.
Epoch 291/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 292/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 293/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 294/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 295/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 296/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 297/2000
108/108 [==============================] - 0s 194us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 298/2000
108/108 [==============================] - ETA: 0s - loss: 0.5656 - accuracy: 0.75 - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 299/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 300/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00300: ReduceLROnPlateau reducing learning rate to 5.5879354962651284e-12.
Epoch 301/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 302/2000
108/108 [==============================] - 0s 204us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 303/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 304/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 305/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 306/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 307/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 308/2000
108/108 [==============================] - 0s 194us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 309/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 310/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00310: ReduceLROnPlateau reducing learning rate to 2.7939677481325642e-12.
Epoch 311/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 312/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 313/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 314/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 315/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 316/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 317/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 318/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 319/2000
108/108 [==============================] - 0s 370us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 320/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00320: ReduceLROnPlateau reducing learning rate to 1.3969838740662821e-12.
Epoch 321/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 322/2000
108/108 [==============================] - 0s 287us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 323/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 324/2000
108/108 [==============================] - 0s 268us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 325/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 326/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 327/2000
108/108 [==============================] - 0s 231us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 328/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 329/2000
108/108 [==============================] - 0s 231us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 330/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00330: ReduceLROnPlateau reducing learning rate to 6.984919370331411e-13.
Epoch 331/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 332/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 333/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 334/2000
108/108 [==============================] - 0s 222us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 335/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 336/2000
108/108 [==============================] - 0s 213us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 337/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 338/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 339/2000
108/108 [==============================] - 0s 287us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 340/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00340: ReduceLROnPlateau reducing learning rate to 3.4924596851657053e-13.
Epoch 341/2000
108/108 [==============================] - 0s 231us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 342/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 343/2000
108/108 [==============================] - 0s 315us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 344/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 345/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 346/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 347/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 348/2000
108/108 [==============================] - 0s 204us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 349/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 350/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00350: ReduceLROnPlateau reducing learning rate to 1.7462298425828526e-13.
Epoch 351/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 352/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 353/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 354/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 355/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 356/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 357/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 358/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 359/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 360/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00360: ReduceLROnPlateau reducing learning rate to 8.731149212914263e-14.
Epoch 361/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 362/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 363/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 364/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 365/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 366/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 367/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 368/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 369/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 370/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00370: ReduceLROnPlateau reducing learning rate to 4.3655746064571316e-14.
Epoch 371/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 372/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 373/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 374/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 375/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 376/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 377/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 378/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 379/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 380/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00380: ReduceLROnPlateau reducing learning rate to 2.1827873032285658e-14.
Epoch 381/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 382/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 383/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 384/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 385/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 386/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 387/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 388/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 389/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 390/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00390: ReduceLROnPlateau reducing learning rate to 1.0913936516142829e-14.
Epoch 391/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 392/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 393/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 394/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 395/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 396/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 397/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 398/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 399/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 400/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00400: ReduceLROnPlateau reducing learning rate to 5.4569682580714145e-15.
Epoch 401/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 402/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 403/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 404/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 405/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 406/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 407/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 408/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 409/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 410/2000
108/108 [==============================] - 0s 204us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00410: ReduceLROnPlateau reducing learning rate to 2.7284841290357072e-15.
Epoch 411/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 412/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 413/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 414/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 415/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 416/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 417/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 418/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 419/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 420/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00420: ReduceLROnPlateau reducing learning rate to 1.3642420645178536e-15.
Epoch 421/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 422/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 423/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 424/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 425/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 426/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 427/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 428/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 429/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 430/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00430: ReduceLROnPlateau reducing learning rate to 6.821210322589268e-16.
Epoch 431/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 432/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 433/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 434/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 435/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 436/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 437/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 438/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 439/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 440/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00440: ReduceLROnPlateau reducing learning rate to 3.410605161294634e-16.
Epoch 441/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 442/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 443/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 444/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 445/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 446/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 447/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 448/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 449/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 450/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00450: ReduceLROnPlateau reducing learning rate to 1.705302580647317e-16.
Epoch 451/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 452/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 453/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 454/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 455/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 456/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 457/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 458/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 459/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 460/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00460: ReduceLROnPlateau reducing learning rate to 8.526512903236585e-17.
Epoch 461/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 462/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 463/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 464/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 465/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 466/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 467/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 468/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 469/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 470/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00470: ReduceLROnPlateau reducing learning rate to 4.2632564516182926e-17.
Epoch 471/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 472/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 473/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 474/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 475/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 476/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 477/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 478/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 479/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 480/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00480: ReduceLROnPlateau reducing learning rate to 2.1316282258091463e-17.
Epoch 481/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 482/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 483/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 484/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 485/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 486/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 487/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 488/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 489/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 490/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00490: ReduceLROnPlateau reducing learning rate to 1.0658141129045731e-17.
Epoch 491/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 492/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 493/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 494/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 495/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 496/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 497/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 498/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 499/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 500/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00500: ReduceLROnPlateau reducing learning rate to 5.329070564522866e-18.
Epoch 501/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 502/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 503/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 504/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 505/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 506/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 507/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 508/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 509/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 510/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00510: ReduceLROnPlateau reducing learning rate to 2.664535282261433e-18.
Epoch 511/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 512/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 513/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 514/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 515/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 516/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 517/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 518/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 519/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 520/2000
108/108 [==============================] - ETA: 0s - loss: 0.6291 - accuracy: 0.68 - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00520: ReduceLROnPlateau reducing learning rate to 1.3322676411307164e-18.
Epoch 521/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 522/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 523/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 524/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 525/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 526/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 527/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 528/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 529/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 530/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00530: ReduceLROnPlateau reducing learning rate to 6.661338205653582e-19.
Epoch 531/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 532/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 533/2000
108/108 [==============================] - 0s 204us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 534/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 535/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 536/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 537/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 538/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 539/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 540/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00540: ReduceLROnPlateau reducing learning rate to 3.330669102826791e-19.
Epoch 541/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 542/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 543/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 544/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 545/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 546/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 547/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 548/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 549/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 550/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00550: ReduceLROnPlateau reducing learning rate to 1.6653345514133955e-19.
Epoch 551/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 552/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 553/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 554/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 555/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 556/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 557/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 558/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 559/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 560/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00560: ReduceLROnPlateau reducing learning rate to 8.326672757066978e-20.
Epoch 561/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 562/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 563/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 564/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 565/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 566/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 567/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 568/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 569/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 570/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00570: ReduceLROnPlateau reducing learning rate to 4.163336378533489e-20.
Epoch 571/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 572/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 573/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 574/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 575/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 576/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 577/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 578/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 579/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 580/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00580: ReduceLROnPlateau reducing learning rate to 2.0816681892667444e-20.
Epoch 581/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 582/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 583/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 584/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 585/2000
108/108 [==============================] - ETA: 0s - loss: 0.5617 - accuracy: 0.75 - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 586/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 587/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 588/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 589/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 590/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00590: ReduceLROnPlateau reducing learning rate to 1.0408340946333722e-20.
Epoch 591/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 592/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 593/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 594/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 595/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 596/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 597/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 598/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 599/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 600/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00600: ReduceLROnPlateau reducing learning rate to 5.204170473166861e-21.
Epoch 601/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 602/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 603/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 604/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 605/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 606/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 607/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 608/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 609/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 610/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00610: ReduceLROnPlateau reducing learning rate to 2.6020852365834305e-21.
Epoch 611/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 612/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 613/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 614/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 615/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 616/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 617/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 618/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 619/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 620/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00620: ReduceLROnPlateau reducing learning rate to 1.3010426182917153e-21.
Epoch 621/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 622/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 623/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 624/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 625/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 626/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 627/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 628/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 629/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 630/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00630: ReduceLROnPlateau reducing learning rate to 6.505213091458576e-22.
Epoch 631/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 632/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 633/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 634/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 635/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 636/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 637/2000
108/108 [==============================] - ETA: 0s - loss: 0.6880 - accuracy: 0.56 - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 638/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 639/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 640/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00640: ReduceLROnPlateau reducing learning rate to 3.252606545729288e-22.
Epoch 641/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 642/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 643/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 644/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 645/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 646/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 647/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 648/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 649/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 650/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00650: ReduceLROnPlateau reducing learning rate to 1.626303272864644e-22.
Epoch 651/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 652/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 653/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 654/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 655/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 656/2000
108/108 [==============================] - 0s 194us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 657/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 658/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 659/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 660/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00660: ReduceLROnPlateau reducing learning rate to 8.13151636432322e-23.
Epoch 661/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 662/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 663/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 664/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 665/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 666/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 667/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 668/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 669/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 670/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00670: ReduceLROnPlateau reducing learning rate to 4.06575818216161e-23.
Epoch 671/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 672/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 673/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 674/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 675/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 676/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 677/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 678/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 679/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 680/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00680: ReduceLROnPlateau reducing learning rate to 2.032879091080805e-23.
Epoch 681/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 682/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 683/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 684/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 685/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 686/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 687/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 688/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 689/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 690/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00690: ReduceLROnPlateau reducing learning rate to 1.0164395455404025e-23.
Epoch 691/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 692/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 693/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 694/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 695/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 696/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 697/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 698/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 699/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 700/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00700: ReduceLROnPlateau reducing learning rate to 5.082197727702013e-24.
Epoch 701/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 702/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 703/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 704/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 705/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 706/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 707/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 708/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 709/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 710/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00710: ReduceLROnPlateau reducing learning rate to 2.5410988638510064e-24.
Epoch 711/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 712/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 713/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 714/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 715/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 716/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 717/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 718/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 719/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 720/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00720: ReduceLROnPlateau reducing learning rate to 1.2705494319255032e-24.
Epoch 721/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 722/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 723/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 724/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 725/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 726/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 727/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 728/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 729/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 730/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00730: ReduceLROnPlateau reducing learning rate to 6.352747159627516e-25.
Epoch 731/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 732/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 733/2000
108/108 [==============================] - 0s 213us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 734/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 735/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 736/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 737/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 738/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 739/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 740/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00740: ReduceLROnPlateau reducing learning rate to 3.176373579813758e-25.
Epoch 741/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 742/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 743/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 744/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 745/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 746/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 747/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 748/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 749/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 750/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00750: ReduceLROnPlateau reducing learning rate to 1.588186789906879e-25.
Epoch 751/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 752/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 753/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 754/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 755/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 756/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 757/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 758/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 759/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 760/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00760: ReduceLROnPlateau reducing learning rate to 7.940933949534395e-26.
Epoch 761/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 762/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 763/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 764/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 765/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 766/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 767/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 768/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 769/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 770/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00770: ReduceLROnPlateau reducing learning rate to 3.9704669747671974e-26.
Epoch 771/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 772/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 773/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 774/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 775/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 776/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 777/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 778/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 779/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 780/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00780: ReduceLROnPlateau reducing learning rate to 1.9852334873835987e-26.
Epoch 781/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 782/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 783/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 784/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 785/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 786/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 787/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 788/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 789/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 790/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00790: ReduceLROnPlateau reducing learning rate to 9.926167436917994e-27.
Epoch 791/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 792/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 793/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 794/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 795/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 796/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 797/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 798/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 799/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 800/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00800: ReduceLROnPlateau reducing learning rate to 4.963083718458997e-27.
Epoch 801/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 802/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 803/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 804/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 805/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 806/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 807/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 808/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 809/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 810/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00810: ReduceLROnPlateau reducing learning rate to 2.4815418592294984e-27.
Epoch 811/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 812/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 813/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 814/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 815/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 816/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 817/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 818/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 819/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 820/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00820: ReduceLROnPlateau reducing learning rate to 1.2407709296147492e-27.
Epoch 821/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 822/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 823/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 824/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 825/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 826/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 827/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 828/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 829/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 830/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00830: ReduceLROnPlateau reducing learning rate to 6.203854648073746e-28.
Epoch 831/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 832/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 833/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 834/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 835/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 836/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 837/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 838/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 839/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 840/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00840: ReduceLROnPlateau reducing learning rate to 3.101927324036873e-28.
Epoch 841/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 842/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 843/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 844/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 845/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 846/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 847/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 848/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 849/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 850/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00850: ReduceLROnPlateau reducing learning rate to 1.5509636620184365e-28.
Epoch 851/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 852/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 853/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 854/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 855/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 856/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 857/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 858/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 859/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 860/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00860: ReduceLROnPlateau reducing learning rate to 7.754818310092183e-29.
Epoch 861/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 862/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 863/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 864/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 865/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 866/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 867/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 868/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 869/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 870/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00870: ReduceLROnPlateau reducing learning rate to 3.877409155046091e-29.
Epoch 871/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 872/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 873/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 874/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 875/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 876/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 877/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 878/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 879/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 880/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00880: ReduceLROnPlateau reducing learning rate to 1.9387045775230456e-29.
Epoch 881/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 882/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 883/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 884/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 885/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 886/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 887/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 888/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 889/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 890/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00890: ReduceLROnPlateau reducing learning rate to 9.693522887615228e-30.
Epoch 891/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 892/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 893/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 894/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 895/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 896/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 897/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 898/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 899/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 900/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00900: ReduceLROnPlateau reducing learning rate to 4.846761443807614e-30.
Epoch 901/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 902/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 903/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 904/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 905/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 906/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 907/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 908/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 909/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 910/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00910: ReduceLROnPlateau reducing learning rate to 2.423380721903807e-30.
Epoch 911/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 912/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 913/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 914/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 915/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 916/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 917/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 918/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 919/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 920/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00920: ReduceLROnPlateau reducing learning rate to 1.2116903609519035e-30.
Epoch 921/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 922/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 923/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 924/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 925/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 926/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 927/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 928/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 929/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 930/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00930: ReduceLROnPlateau reducing learning rate to 6.058451804759518e-31.
Epoch 931/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 932/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 933/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 934/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 935/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 936/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 937/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 938/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 939/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 940/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00940: ReduceLROnPlateau reducing learning rate to 3.029225902379759e-31.
Epoch 941/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 942/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 943/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 944/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 945/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 946/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 947/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 948/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 949/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 950/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00950: ReduceLROnPlateau reducing learning rate to 1.5146129511898794e-31.
Epoch 951/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 952/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 953/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 954/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 955/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 956/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 957/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 958/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 959/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 960/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00960: ReduceLROnPlateau reducing learning rate to 7.573064755949397e-32.
Epoch 961/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 962/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 963/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 964/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 965/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 966/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 967/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 968/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 969/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 970/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00970: ReduceLROnPlateau reducing learning rate to 3.7865323779746985e-32.
Epoch 971/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 972/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 973/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 974/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 975/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 976/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 977/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 978/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 979/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 980/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00980: ReduceLROnPlateau reducing learning rate to 1.8932661889873492e-32.
Epoch 981/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 982/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 983/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 984/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 985/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 986/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 987/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 988/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 989/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 990/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 00990: ReduceLROnPlateau reducing learning rate to 9.466330944936746e-33.
Epoch 991/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 992/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 993/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 994/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 995/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 996/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 997/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 998/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 999/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1000/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01000: ReduceLROnPlateau reducing learning rate to 4.733165472468373e-33.
Epoch 1001/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1002/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1003/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1004/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1005/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1006/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1007/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1008/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1009/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1010/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01010: ReduceLROnPlateau reducing learning rate to 2.3665827362341866e-33.
Epoch 1011/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1012/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1013/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1014/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1015/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1016/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1017/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1018/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1019/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1020/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01020: ReduceLROnPlateau reducing learning rate to 1.1832913681170933e-33.
Epoch 1021/2000
108/108 [==============================] - ETA: 0s - loss: 0.5831 - accuracy: 0.68 - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1022/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1023/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1024/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1025/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1026/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1027/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1028/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1029/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1030/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01030: ReduceLROnPlateau reducing learning rate to 5.916456840585466e-34.
Epoch 1031/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1032/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1033/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1034/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1035/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1036/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1037/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1038/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1039/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1040/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01040: ReduceLROnPlateau reducing learning rate to 2.958228420292733e-34.
Epoch 1041/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1042/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1043/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1044/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1045/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1046/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1047/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1048/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1049/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1050/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01050: ReduceLROnPlateau reducing learning rate to 1.4791142101463666e-34.
Epoch 1051/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1052/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1053/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1054/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1055/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1056/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1057/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1058/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1059/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1060/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01060: ReduceLROnPlateau reducing learning rate to 7.395571050731833e-35.
Epoch 1061/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1062/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1063/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1064/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1065/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1066/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1067/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1068/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1069/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1070/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01070: ReduceLROnPlateau reducing learning rate to 3.6977855253659165e-35.
Epoch 1071/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1072/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1073/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1074/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1075/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1076/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1077/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1078/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1079/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1080/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01080: ReduceLROnPlateau reducing learning rate to 1.8488927626829582e-35.
Epoch 1081/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1082/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1083/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1084/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1085/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1086/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1087/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1088/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1089/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1090/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01090: ReduceLROnPlateau reducing learning rate to 9.244463813414791e-36.
Epoch 1091/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1092/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1093/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1094/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1095/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1096/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1097/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1098/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1099/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1100/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01100: ReduceLROnPlateau reducing learning rate to 4.6222319067073956e-36.
Epoch 1101/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1102/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1103/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1104/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1105/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1106/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1107/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1108/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1109/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1110/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01110: ReduceLROnPlateau reducing learning rate to 2.3111159533536978e-36.
Epoch 1111/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1112/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1113/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1114/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1115/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1116/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1117/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1118/2000
108/108 [==============================] - ETA: 0s - loss: 0.5864 - accuracy: 0.68 - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1119/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1120/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01120: ReduceLROnPlateau reducing learning rate to 1.1555579766768489e-36.
Epoch 1121/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1122/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1123/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1124/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1125/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1126/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1127/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1128/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1129/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1130/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01130: ReduceLROnPlateau reducing learning rate to 5.7777898833842445e-37.
Epoch 1131/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1132/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1133/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1134/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1135/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1136/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1137/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1138/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1139/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1140/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01140: ReduceLROnPlateau reducing learning rate to 2.8888949416921223e-37.
Epoch 1141/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1142/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1143/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1144/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1145/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1146/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1147/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1148/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1149/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1150/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01150: ReduceLROnPlateau reducing learning rate to 1.4444474708460611e-37.
Epoch 1151/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1152/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1153/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1154/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1155/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1156/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1157/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1158/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1159/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1160/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01160: ReduceLROnPlateau reducing learning rate to 7.222237354230306e-38.
Epoch 1161/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1162/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1163/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1164/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1165/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1166/2000
108/108 [==============================] - 0s 250us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1167/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1168/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1169/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1170/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01170: ReduceLROnPlateau reducing learning rate to 3.611118677115153e-38.
Epoch 1171/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1172/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1173/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1174/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1175/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1176/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1177/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1178/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1179/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1180/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01180: ReduceLROnPlateau reducing learning rate to 1.8055593385575764e-38.
Epoch 1181/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1182/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1183/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1184/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1185/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1186/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1187/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1188/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1189/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1190/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01190: ReduceLROnPlateau reducing learning rate to 9.027796692787882e-39.
Epoch 1191/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1192/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1193/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1194/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1195/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1196/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1197/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1198/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1199/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1200/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01200: ReduceLROnPlateau reducing learning rate to 4.513898346393941e-39.
Epoch 1201/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1202/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1203/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1204/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1205/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1206/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1207/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1208/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1209/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1210/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01210: ReduceLROnPlateau reducing learning rate to 2.2569495235215866e-39.
Epoch 1211/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1212/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1213/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1214/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1215/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1216/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1217/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1218/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1219/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1220/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01220: ReduceLROnPlateau reducing learning rate to 1.1284747617607933e-39.
Epoch 1221/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1222/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1223/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1224/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1225/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1226/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1227/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1228/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1229/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1230/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01230: ReduceLROnPlateau reducing learning rate to 5.642370305557806e-40.
Epoch 1231/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1232/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1233/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1234/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1235/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1236/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1237/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1238/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1239/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1240/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01240: ReduceLROnPlateau reducing learning rate to 2.821185152778903e-40.
Epoch 1241/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1242/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1243/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1244/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1245/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1246/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1247/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1248/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1249/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1250/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01250: ReduceLROnPlateau reducing learning rate to 1.4105890731432906e-40.
Epoch 1251/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1252/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1253/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1254/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1255/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1256/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1257/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1258/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1259/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1260/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01260: ReduceLROnPlateau reducing learning rate to 7.052945365716453e-41.
Epoch 1261/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1262/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1263/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1264/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1265/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1266/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1267/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1268/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1269/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1270/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01270: ReduceLROnPlateau reducing learning rate to 3.5265077153198346e-41.
Epoch 1271/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1272/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1273/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1274/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1275/2000
108/108 [==============================] - 0s 129us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1276/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1277/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1278/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1279/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1280/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01280: ReduceLROnPlateau reducing learning rate to 1.7632538576599173e-41.
Epoch 1281/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1282/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1283/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1284/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1285/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1286/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1287/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1288/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1289/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1290/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01290: ReduceLROnPlateau reducing learning rate to 8.816269288299587e-42.
Epoch 1291/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1292/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1293/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1294/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1295/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1296/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1297/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1298/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1299/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1300/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01300: ReduceLROnPlateau reducing learning rate to 4.4084849687658745e-42.
Epoch 1301/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1302/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1303/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1304/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1305/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1306/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1307/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1308/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1309/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1310/2000
108/108 [==============================] - ETA: 0s - loss: 0.6582 - accuracy: 0.56 - 0s 194us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01310: ReduceLROnPlateau reducing learning rate to 2.2042424843829373e-42.
Epoch 1311/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1312/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1313/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1314/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1315/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1316/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1317/2000
108/108 [==============================] - ETA: 0s - loss: 0.6126 - accuracy: 0.68 - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1318/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1319/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1320/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01320: ReduceLROnPlateau reducing learning rate to 1.1021212421914686e-42.
Epoch 1321/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1322/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1323/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1324/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1325/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1326/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1327/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1328/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1329/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1330/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01330: ReduceLROnPlateau reducing learning rate to 5.507102964796531e-43.
Epoch 1331/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1332/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1333/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1334/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1335/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1336/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1337/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1338/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1339/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1340/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01340: ReduceLROnPlateau reducing learning rate to 2.7535514823982655e-43.
Epoch 1341/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1342/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1343/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1344/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1345/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1346/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1347/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1348/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1349/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1350/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01350: ReduceLROnPlateau reducing learning rate to 1.3732724950383207e-43.
Epoch 1351/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1352/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1353/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1354/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1355/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1356/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1357/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1358/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1359/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1360/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01360: ReduceLROnPlateau reducing learning rate to 6.866362475191604e-44.
Epoch 1361/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1362/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1363/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1364/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1365/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1366/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1367/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1368/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1369/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1370/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01370: ReduceLROnPlateau reducing learning rate to 3.433181237595802e-44.
Epoch 1371/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1372/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1373/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1374/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1375/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1376/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1377/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1378/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1379/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1380/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01380: ReduceLROnPlateau reducing learning rate to 1.6815581571897805e-44.
Epoch 1381/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1382/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1383/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1384/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1385/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1386/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1387/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1388/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1389/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1390/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01390: ReduceLROnPlateau reducing learning rate to 8.407790785948902e-45.
Epoch 1391/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1392/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1393/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1394/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1395/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1396/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1397/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1398/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1399/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1400/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01400: ReduceLROnPlateau reducing learning rate to 4.203895392974451e-45.
Epoch 1401/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1402/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1403/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1404/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1405/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1406/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1407/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1408/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1409/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1410/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01410: ReduceLROnPlateau reducing learning rate to 2.1019476964872256e-45.
Epoch 1411/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1412/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1413/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1414/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1415/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1416/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1417/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1418/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1419/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1420/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01420: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1421/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1422/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1423/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1424/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1425/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1426/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1427/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1428/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1429/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1430/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000

Epoch 01430: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1431/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1432/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1433/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1434/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1435/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1436/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1437/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1438/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1439/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1440/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1441/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1442/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1443/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1444/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1445/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1446/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1447/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1448/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1449/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1450/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1451/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1452/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1453/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1454/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1455/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1456/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1457/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1458/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1459/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1460/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1461/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1462/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1463/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1464/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1465/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1466/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1467/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1468/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1469/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1470/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1471/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1472/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1473/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1474/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1475/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1476/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1477/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1478/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1479/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1480/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1481/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1482/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1483/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1484/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1485/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1486/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1487/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1488/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1489/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1490/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1491/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1492/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1493/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1494/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1495/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1496/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1497/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1498/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1499/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1500/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1501/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1502/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1503/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1504/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1505/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1506/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1507/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1508/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1509/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1510/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1511/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1512/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1513/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1514/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1515/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1516/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1517/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1518/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1519/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1520/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1521/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1522/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1523/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1524/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1525/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1526/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1527/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1528/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1529/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1530/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1531/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1532/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1533/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1534/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1535/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1536/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1537/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1538/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1539/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1540/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1541/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1542/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1543/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1544/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1545/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1546/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1547/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1548/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1549/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1550/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1551/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1552/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1553/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1554/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1555/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1556/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1557/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1558/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1559/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1560/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1561/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1562/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1563/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1564/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1565/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1566/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1567/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1568/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1569/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1570/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1571/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1572/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1573/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1574/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1575/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1576/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1577/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1578/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1579/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1580/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1581/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1582/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1583/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1584/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1585/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1586/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1587/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1588/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1589/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1590/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1591/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1592/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1593/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1594/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1595/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1596/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1597/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1598/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1599/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1600/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1601/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1602/2000
108/108 [==============================] - ETA: 0s - loss: 0.5878 - accuracy: 0.71 - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1603/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1604/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1605/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1606/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1607/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1608/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1609/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1610/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1611/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1612/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1613/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1614/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1615/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1616/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1617/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1618/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1619/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1620/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1621/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1622/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1623/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1624/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1625/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1626/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1627/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1628/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1629/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1630/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1631/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1632/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1633/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1634/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1635/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1636/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1637/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1638/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1639/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1640/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1641/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1642/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1643/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1644/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1645/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1646/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1647/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1648/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1649/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1650/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1651/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1652/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1653/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1654/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1655/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1656/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1657/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1658/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1659/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1660/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1661/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1662/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1663/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1664/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1665/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1666/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1667/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1668/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1669/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1670/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1671/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1672/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1673/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1674/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1675/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1676/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1677/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1678/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1679/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1680/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1681/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1682/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1683/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1684/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1685/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1686/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1687/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1688/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1689/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1690/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1691/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1692/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1693/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1694/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1695/2000
108/108 [==============================] - ETA: 0s - loss: 0.5554 - accuracy: 0.75 - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1696/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1697/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1698/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1699/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1700/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1701/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1702/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1703/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1704/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1705/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1706/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1707/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1708/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1709/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1710/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1711/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1712/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1713/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1714/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1715/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1716/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1717/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1718/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1719/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1720/2000
108/108 [==============================] - ETA: 0s - loss: 0.6044 - accuracy: 0.71 - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1721/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1722/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1723/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1724/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1725/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1726/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1727/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1728/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1729/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1730/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1731/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1732/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1733/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1734/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1735/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1736/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1737/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1738/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1739/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1740/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1741/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1742/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1743/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1744/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1745/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1746/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1747/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1748/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1749/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1750/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1751/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1752/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1753/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1754/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1755/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1756/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1757/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1758/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1759/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1760/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1761/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1762/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1763/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1764/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1765/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1766/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1767/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1768/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1769/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1770/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1771/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1772/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1773/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1774/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1775/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1776/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1777/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1778/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1779/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1780/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1781/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1782/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1783/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1784/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1785/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1786/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1787/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1788/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1789/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1790/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1791/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1792/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1793/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1794/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1795/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1796/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1797/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1798/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1799/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1800/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1801/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1802/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1803/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1804/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1805/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1806/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1807/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1808/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1809/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1810/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1811/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1812/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1813/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1814/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1815/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1816/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1817/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1818/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1819/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1820/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1821/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1822/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1823/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1824/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1825/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1826/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1827/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1828/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1829/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1830/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1831/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1832/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1833/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1834/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1835/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1836/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1837/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1838/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1839/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1840/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1841/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1842/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1843/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1844/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1845/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1846/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1847/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1848/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1849/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1850/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1851/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1852/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1853/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1854/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1855/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1856/2000
108/108 [==============================] - ETA: 0s - loss: 0.5179 - accuracy: 0.84 - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1857/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1858/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1859/2000
108/108 [==============================] - 0s 185us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1860/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1861/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1862/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1863/2000
108/108 [==============================] - 0s 176us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1864/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1865/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1866/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1867/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1868/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1869/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1870/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1871/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1872/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1873/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1874/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1875/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1876/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1877/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1878/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1879/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1880/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1881/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1882/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1883/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1884/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1885/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1886/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1887/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1888/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1889/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1890/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1891/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1892/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1893/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1894/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1895/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1896/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1897/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1898/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1899/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1900/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1901/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1902/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1903/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1904/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1905/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1906/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1907/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1908/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1909/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1910/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1911/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1912/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1913/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1914/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1915/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1916/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1917/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1918/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1919/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1920/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1921/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1922/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1923/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1924/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1925/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1926/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1927/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1928/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1929/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1930/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1931/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1932/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1933/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1934/2000
108/108 [==============================] - 0s 167us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1935/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1936/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1937/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1938/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1939/2000
108/108 [==============================] - 0s 93us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1940/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1941/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1942/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1943/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1944/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1945/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1946/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1947/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1948/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1949/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1950/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1951/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1952/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1953/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1954/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1955/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1956/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1957/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1958/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1959/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1960/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1961/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1962/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1963/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1964/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1965/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1966/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1967/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1968/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1969/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1970/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1971/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1972/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1973/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1974/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1975/2000
108/108 [==============================] - ETA: 0s - loss: 0.6422 - accuracy: 0.68 - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1976/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1977/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1978/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1979/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1980/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1981/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1982/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1983/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1984/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1985/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1986/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1987/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1988/2000
108/108 [==============================] - 0s 102us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1989/2000
108/108 [==============================] - 0s 111us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1990/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1991/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1992/2000
108/108 [==============================] - 0s 120us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1993/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1994/2000
108/108 [==============================] - 0s 157us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1995/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1996/2000
108/108 [==============================] - 0s 148us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1997/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1998/2000
108/108 [==============================] - 0s 139us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 1999/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
Epoch 2000/2000
108/108 [==============================] - 0s 130us/step - loss: 0.6045 - accuracy: 0.6852 - val_loss: 0.6920 - val_accuracy: 0.5000
In [213]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [214]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
36/36 [==============================] - 0s 111us/step
test loss: 0.6919848455323113, test accuracy: 0.5
In [215]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.528428093645485
In [216]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  -0.04854368932038833
[[13 10]
 [ 8  5]]

KMeans

In [320]:
X
Out[320]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.297447 0.231356 0.546295 1.007345 0.269632 0.236051
1 0.766614 -1.227377 -0.605576 -1.216250 0.473081 -0.446139
2 -0.431734 -1.183099 1.287522 0.927452 -0.309857 -1.666006
3 0.078607 -1.572049 1.188424 0.092330 -0.244414 -0.490911
4 0.007655 -1.466562 1.294323 -0.410616 -0.763440 -1.007284
5 -0.370498 1.671450 0.153543 -0.812481 0.183921 -0.155363
6 -0.291598 1.214994 -0.454284 -0.064902 -0.043613 0.345369
7 0.069950 -2.315347 -1.489877 -0.732147 0.555418 -1.397283
8 -0.492019 1.159708 0.098478 0.022986 1.057930 0.455568
9 0.452658 -1.370382 0.122635 0.031601 0.705042 -0.811405
10 0.178305 -1.404896 -0.110526 -1.915899 -0.893670 -0.103585
11 -0.394571 -2.295623 -0.401638 0.054717 0.112940 -0.501624
12 0.825509 -0.879700 -0.340624 -0.248308 -0.147973 -1.486938
13 -0.567314 -1.474896 -0.684124 -0.105587 0.149929 0.556946
14 -0.287062 -2.507554 -0.996441 -0.535391 0.704154 -0.243508
15 1.589945 0.608356 1.711054 -0.876066 0.069532 -1.756202
16 1.546148 -0.417533 -1.674934 0.046415 0.405062 1.968764
17 1.404228 -0.047254 0.691725 0.713455 -0.813024 -0.471725
18 0.518614 -0.334892 -1.333394 0.534502 -0.127375 1.289634
19 1.235351 0.809799 -0.698921 1.861197 0.360070 1.788748
20 -0.483594 -0.266831 0.102686 -0.442555 0.306825 0.803359
21 0.728509 -0.464576 -1.661459 -0.142280 -0.515254 0.741121
22 1.170053 0.597229 1.612975 0.936085 0.020237 -0.219170
23 1.225988 -1.042734 -2.231606 -1.970862 0.162396 0.833329
24 0.272811 0.538009 0.918204 0.116495 -1.927558 0.690336
25 -0.521172 1.340198 1.481586 -0.367734 -0.838038 0.649490
26 -0.188935 0.665974 1.392134 -0.321225 -2.463101 0.567261
27 1.519541 -0.246954 1.208791 0.222868 0.071996 -0.382862
28 -0.405847 -1.303832 1.440527 -0.539141 -1.285875 -0.330782
29 -1.850113 -1.286361 0.526982 -0.426757 -0.821369 -0.659039
... ... ... ... ... ... ...
114 1.085296 1.141590 -1.386728 0.289820 -0.315495 1.792574
115 -1.024369 0.443668 -0.122627 0.442887 0.357498 -1.433931
116 0.572112 -1.055656 0.222883 -0.373323 -0.878489 1.874500
117 0.550237 -0.766605 -0.056746 0.016528 -0.339377 0.722854
118 0.378441 -1.897452 -0.229383 -2.155450 -1.099087 1.240515
119 -0.936242 -0.306037 -0.207792 1.249626 -1.690910 -1.306194
120 1.647950 0.514377 -2.369044 0.073856 -0.669624 -0.740015
121 1.150939 -0.633833 -0.604698 1.176872 2.035262 0.080449
122 0.546853 1.514343 0.069751 -1.901174 0.644701 -1.269584
123 1.223435 0.897899 0.213625 -0.937897 0.918787 0.720059
124 0.857062 0.842244 -0.304539 -0.782109 -0.165739 0.507814
125 0.730334 -0.675216 0.505721 -0.782744 -0.443136 0.211297
126 0.541766 -0.279794 0.749254 0.499473 -0.700209 0.809215
127 0.456191 -0.974577 0.840615 0.567252 -0.544141 0.020146
128 0.712574 0.049329 1.621927 -0.707602 -1.976844 -0.222721
129 0.675521 -0.012987 1.395059 -1.145915 -1.822364 -0.863284
130 -0.312450 1.433627 0.708713 -2.164346 -0.919893 -0.306371
131 0.181615 1.164977 -0.985505 -0.574818 -0.030196 0.270671
132 0.260510 0.988101 -0.315543 -1.769090 -0.459089 0.211096
133 -0.311371 -1.408386 -1.645724 0.532301 1.345514 0.165424
134 -0.301269 -0.950573 -1.290638 0.868461 0.572738 -0.467161
135 -0.309830 -0.407406 0.014910 -0.223798 0.528946 -0.355130
136 -1.610060 0.941392 -1.405020 -0.222889 -0.007353 -0.446852
137 -0.925268 -0.067389 -0.201548 -0.622410 1.792638 -1.539592
138 -0.015264 0.452609 -0.699062 1.150528 1.287409 -0.173602
139 1.326980 -1.383698 0.746727 1.072280 -0.295723 -1.047791
140 0.896216 -1.267594 1.427336 0.997423 1.895405 0.117410
141 1.185098 -0.497068 0.605539 -1.180788 -0.694937 0.254679
142 0.613731 1.209050 -1.541221 0.828782 -1.372158 0.505984
143 -1.033863 0.780408 0.144446 -0.151813 -0.085778 -0.271083

144 rows × 6 columns

In [321]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[321]:
[864.0000000000001,
 718.4272269949738,
 620.3882940000645,
 547.0243831595473,
 498.4088405289721,
 452.2938075669829,
 419.0812017663501,
 394.96856155909757,
 378.5884628409906,
 357.339318590697,
 344.59994179556793,
 327.22045118404804,
 316.0718756196303,
 310.81264751973913]
In [322]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[322]:
[<matplotlib.lines.Line2D at 0x1b82c08cc18>]

K=4

In [323]:
kmeans_tc = KMeans(n_clusters=4, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[323]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=4, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [324]:
kmeans_tc.labels_
Out[324]:
array([3, 3, 3, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 0, 0, 0,
       3, 0, 3, 2, 3, 3, 3, 2, 3, 3, 0, 0, 2, 2, 0, 1, 0, 2, 1, 1, 1, 2,
       2, 2, 0, 0, 0, 2, 2, 0, 1, 1, 1, 0, 3, 3, 0, 3, 0, 3, 2, 2, 2, 1,
       1, 1, 3, 3, 1, 2, 0, 3, 2, 2, 2, 2, 2, 2, 3, 2, 3, 3, 3, 2, 3, 0,
       3, 1, 1, 1, 1, 2, 0, 1, 0, 2, 1, 0, 2, 3, 1, 2, 2, 1, 2, 2, 2, 1,
       2, 1, 0, 3, 0, 2, 0, 0, 3, 2, 0, 0, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1,
       1, 0, 0, 1, 2, 1, 2, 3, 3, 3, 0, 2])
In [325]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[325]:
array([3, 3, 3, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 0, 0, 0,
       3, 0, 3, 2, 3, 3, 3, 2, 3, 3, 0, 0, 2, 2, 0, 1, 0, 2, 1, 1, 1, 2,
       2, 2, 0, 0, 0, 2, 2, 0, 1, 1, 1, 0, 3, 3, 0, 3, 0, 3, 2, 2, 2, 1,
       1, 1, 3, 3, 1, 2, 0, 3, 2, 2, 2, 2, 2, 2, 3, 2, 3, 3, 3, 2, 3, 0,
       3, 1, 1, 1, 1, 2, 0, 1, 0, 2, 1, 0, 2, 3, 1, 2, 2, 1, 2, 2, 2, 1,
       2, 1, 0, 3, 0, 2, 0, 0, 3, 2, 0, 0, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1,
       1, 0, 0, 1, 2, 1, 2, 3, 3, 3, 0, 2])
In [326]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [327]:
X
Out[327]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.297447 0.231356 0.546295 1.007345 0.269632 0.236051 3 0
1 0.766614 -1.227377 -0.605576 -1.216250 0.473081 -0.446139 3 0
2 -0.431734 -1.183099 1.287522 0.927452 -0.309857 -1.666006 3 0
3 0.078607 -1.572049 1.188424 0.092330 -0.244414 -0.490911 3 0
4 0.007655 -1.466562 1.294323 -0.410616 -0.763440 -1.007284 3 0
5 -0.370498 1.671450 0.153543 -0.812481 0.183921 -0.155363 1 0
6 -0.291598 1.214994 -0.454284 -0.064902 -0.043613 0.345369 1 0
7 0.069950 -2.315347 -1.489877 -0.732147 0.555418 -1.397283 3 0
8 -0.492019 1.159708 0.098478 0.022986 1.057930 0.455568 1 0
9 0.452658 -1.370382 0.122635 0.031601 0.705042 -0.811405 3 0
10 0.178305 -1.404896 -0.110526 -1.915899 -0.893670 -0.103585 3 0
11 -0.394571 -2.295623 -0.401638 0.054717 0.112940 -0.501624 3 0
12 0.825509 -0.879700 -0.340624 -0.248308 -0.147973 -1.486938 3 0
13 -0.567314 -1.474896 -0.684124 -0.105587 0.149929 0.556946 0 0
14 -0.287062 -2.507554 -0.996441 -0.535391 0.704154 -0.243508 0 0
15 1.589945 0.608356 1.711054 -0.876066 0.069532 -1.756202 3 0
16 1.546148 -0.417533 -1.674934 0.046415 0.405062 1.968764 0 0
17 1.404228 -0.047254 0.691725 0.713455 -0.813024 -0.471725 3 0
18 0.518614 -0.334892 -1.333394 0.534502 -0.127375 1.289634 0 0
19 1.235351 0.809799 -0.698921 1.861197 0.360070 1.788748 0 0
20 -0.483594 -0.266831 0.102686 -0.442555 0.306825 0.803359 0 0
21 0.728509 -0.464576 -1.661459 -0.142280 -0.515254 0.741121 0 0
22 1.170053 0.597229 1.612975 0.936085 0.020237 -0.219170 3 0
23 1.225988 -1.042734 -2.231606 -1.970862 0.162396 0.833329 0 0
24 0.272811 0.538009 0.918204 0.116495 -1.927558 0.690336 3 0
25 -0.521172 1.340198 1.481586 -0.367734 -0.838038 0.649490 2 0
26 -0.188935 0.665974 1.392134 -0.321225 -2.463101 0.567261 3 0
27 1.519541 -0.246954 1.208791 0.222868 0.071996 -0.382862 3 0
28 -0.405847 -1.303832 1.440527 -0.539141 -1.285875 -0.330782 3 0
29 -1.850113 -1.286361 0.526982 -0.426757 -0.821369 -0.659039 2 0
... ... ... ... ... ... ... ... ...
114 1.085296 1.141590 -1.386728 0.289820 -0.315495 1.792574 0 1
115 -1.024369 0.443668 -0.122627 0.442887 0.357498 -1.433931 2 1
116 0.572112 -1.055656 0.222883 -0.373323 -0.878489 1.874500 0 1
117 0.550237 -0.766605 -0.056746 0.016528 -0.339377 0.722854 0 1
118 0.378441 -1.897452 -0.229383 -2.155450 -1.099087 1.240515 3 1
119 -0.936242 -0.306037 -0.207792 1.249626 -1.690910 -1.306194 2 1
120 1.647950 0.514377 -2.369044 0.073856 -0.669624 -0.740015 0 1
121 1.150939 -0.633833 -0.604698 1.176872 2.035262 0.080449 0 1
122 0.546853 1.514343 0.069751 -1.901174 0.644701 -1.269584 1 1
123 1.223435 0.897899 0.213625 -0.937897 0.918787 0.720059 1 1
124 0.857062 0.842244 -0.304539 -0.782109 -0.165739 0.507814 1 1
125 0.730334 -0.675216 0.505721 -0.782744 -0.443136 0.211297 3 1
126 0.541766 -0.279794 0.749254 0.499473 -0.700209 0.809215 3 1
127 0.456191 -0.974577 0.840615 0.567252 -0.544141 0.020146 3 1
128 0.712574 0.049329 1.621927 -0.707602 -1.976844 -0.222721 3 1
129 0.675521 -0.012987 1.395059 -1.145915 -1.822364 -0.863284 3 1
130 -0.312450 1.433627 0.708713 -2.164346 -0.919893 -0.306371 1 1
131 0.181615 1.164977 -0.985505 -0.574818 -0.030196 0.270671 1 1
132 0.260510 0.988101 -0.315543 -1.769090 -0.459089 0.211096 1 1
133 -0.311371 -1.408386 -1.645724 0.532301 1.345514 0.165424 0 1
134 -0.301269 -0.950573 -1.290638 0.868461 0.572738 -0.467161 0 1
135 -0.309830 -0.407406 0.014910 -0.223798 0.528946 -0.355130 1 1
136 -1.610060 0.941392 -1.405020 -0.222889 -0.007353 -0.446852 2 1
137 -0.925268 -0.067389 -0.201548 -0.622410 1.792638 -1.539592 1 1
138 -0.015264 0.452609 -0.699062 1.150528 1.287409 -0.173602 2 1
139 1.326980 -1.383698 0.746727 1.072280 -0.295723 -1.047791 3 1
140 0.896216 -1.267594 1.427336 0.997423 1.895405 0.117410 3 1
141 1.185098 -0.497068 0.605539 -1.180788 -0.694937 0.254679 3 1
142 0.613731 1.209050 -1.541221 0.828782 -1.372158 0.505984 0 1
143 -1.033863 0.780408 0.144446 -0.151813 -0.085778 -0.271083 2 1

144 rows × 8 columns

In [328]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[328]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b82c211b00>
In [329]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

Specialized

ANN

In [217]:
X = df_n_ps_std_tc[4]
In [218]:
y = df_n_ps[4]['chosen']
In [219]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [220]:
X_train.shape
Out[220]:
(164, 6)
In [221]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [222]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [223]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [337]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (20, 20), 'learning_rate_init': 0.01, 'max_iter': 300}, que permiten obtener un Accuracy de 68.29% y un Kappa del 36.07
Tiempo total: 25.13 minutos
In [224]:
grid.best_params_={'activation': 'tanh', 'hidden_layer_sizes': (20, 20), 'learning_rate_init': 0.01, 'max_iter': 300}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [225]:
input_tensor = Input(shape = (n0,))
In [226]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [227]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [228]:
model.summary()
Model: "model_11"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_12 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_31 (Dense)             (None, 20)                140       
_________________________________________________________________
dense_32 (Dense)             (None, 20)                420       
_________________________________________________________________
dense_33 (Dense)             (None, 1)                 21        
=================================================================
Total params: 581
Trainable params: 581
Non-trainable params: 0
_________________________________________________________________
In [229]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 164 samples, validate on 55 samples
Epoch 1/300
164/164 [==============================] - 0s 1ms/step - loss: 0.7152 - accuracy: 0.4939 - val_loss: 0.6864 - val_accuracy: 0.6182
Epoch 2/300
164/164 [==============================] - 0s 98us/step - loss: 0.6924 - accuracy: 0.5793 - val_loss: 0.6894 - val_accuracy: 0.5455
Epoch 3/300
164/164 [==============================] - 0s 104us/step - loss: 0.6870 - accuracy: 0.5793 - val_loss: 0.6866 - val_accuracy: 0.6182
Epoch 4/300
164/164 [==============================] - 0s 116us/step - loss: 0.6788 - accuracy: 0.5488 - val_loss: 0.6931 - val_accuracy: 0.5818
Epoch 5/300
164/164 [==============================] - 0s 98us/step - loss: 0.6804 - accuracy: 0.5061 - val_loss: 0.6920 - val_accuracy: 0.5818
Epoch 6/300
164/164 [==============================] - 0s 91us/step - loss: 0.6784 - accuracy: 0.5244 - val_loss: 0.6955 - val_accuracy: 0.5818
Epoch 7/300
164/164 [==============================] - 0s 97us/step - loss: 0.6729 - accuracy: 0.5793 - val_loss: 0.6993 - val_accuracy: 0.5091
Epoch 8/300
164/164 [==============================] - 0s 97us/step - loss: 0.6683 - accuracy: 0.6159 - val_loss: 0.6967 - val_accuracy: 0.5818
Epoch 9/300
164/164 [==============================] - 0s 104us/step - loss: 0.6658 - accuracy: 0.5915 - val_loss: 0.6956 - val_accuracy: 0.6182
Epoch 10/300
164/164 [==============================] - 0s 116us/step - loss: 0.6646 - accuracy: 0.5793 - val_loss: 0.6990 - val_accuracy: 0.5818
Epoch 11/300
164/164 [==============================] - 0s 104us/step - loss: 0.6635 - accuracy: 0.6037 - val_loss: 0.6999 - val_accuracy: 0.5636

Epoch 00011: ReduceLROnPlateau reducing learning rate to 0.004999999888241291.
Epoch 12/300
164/164 [==============================] - 0s 116us/step - loss: 0.6595 - accuracy: 0.5793 - val_loss: 0.6960 - val_accuracy: 0.5818
Epoch 13/300
164/164 [==============================] - 0s 116us/step - loss: 0.6566 - accuracy: 0.6037 - val_loss: 0.6901 - val_accuracy: 0.6000
Epoch 14/300
164/164 [==============================] - 0s 110us/step - loss: 0.6541 - accuracy: 0.6159 - val_loss: 0.6938 - val_accuracy: 0.5636
Epoch 15/300
164/164 [==============================] - 0s 104us/step - loss: 0.6497 - accuracy: 0.6037 - val_loss: 0.6901 - val_accuracy: 0.5818
Epoch 16/300
164/164 [==============================] - 0s 104us/step - loss: 0.6474 - accuracy: 0.5976 - val_loss: 0.6848 - val_accuracy: 0.6000
Epoch 17/300
164/164 [==============================] - 0s 98us/step - loss: 0.6439 - accuracy: 0.6220 - val_loss: 0.6840 - val_accuracy: 0.6727
Epoch 18/300
164/164 [==============================] - 0s 110us/step - loss: 0.6450 - accuracy: 0.6341 - val_loss: 0.6861 - val_accuracy: 0.6545
Epoch 19/300
164/164 [==============================] - 0s 97us/step - loss: 0.6410 - accuracy: 0.6098 - val_loss: 0.6874 - val_accuracy: 0.5818
Epoch 20/300
164/164 [==============================] - 0s 104us/step - loss: 0.6380 - accuracy: 0.6524 - val_loss: 0.6909 - val_accuracy: 0.5455
Epoch 21/300
164/164 [==============================] - 0s 116us/step - loss: 0.6363 - accuracy: 0.6402 - val_loss: 0.6940 - val_accuracy: 0.5455
Epoch 22/300
164/164 [==============================] - 0s 134us/step - loss: 0.6313 - accuracy: 0.6524 - val_loss: 0.6876 - val_accuracy: 0.6364
Epoch 23/300
164/164 [==============================] - 0s 128us/step - loss: 0.6251 - accuracy: 0.6707 - val_loss: 0.6850 - val_accuracy: 0.6182
Epoch 24/300
164/164 [==============================] - 0s 134us/step - loss: 0.6238 - accuracy: 0.6707 - val_loss: 0.6887 - val_accuracy: 0.6182
Epoch 25/300
164/164 [==============================] - 0s 128us/step - loss: 0.6220 - accuracy: 0.6646 - val_loss: 0.6957 - val_accuracy: 0.6364
Epoch 26/300
164/164 [==============================] - 0s 134us/step - loss: 0.6164 - accuracy: 0.6768 - val_loss: 0.7036 - val_accuracy: 0.6000
Epoch 27/300
164/164 [==============================] - 0s 116us/step - loss: 0.6151 - accuracy: 0.7134 - val_loss: 0.7244 - val_accuracy: 0.6182

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 28/300
164/164 [==============================] - 0s 122us/step - loss: 0.6133 - accuracy: 0.6890 - val_loss: 0.7313 - val_accuracy: 0.6182
Epoch 29/300
164/164 [==============================] - 0s 116us/step - loss: 0.6162 - accuracy: 0.6829 - val_loss: 0.7357 - val_accuracy: 0.6182
Epoch 30/300
164/164 [==============================] - 0s 128us/step - loss: 0.6105 - accuracy: 0.7134 - val_loss: 0.7280 - val_accuracy: 0.6000
Epoch 31/300
164/164 [==============================] - 0s 122us/step - loss: 0.6073 - accuracy: 0.7073 - val_loss: 0.7206 - val_accuracy: 0.5455
Epoch 32/300
164/164 [==============================] - 0s 104us/step - loss: 0.6039 - accuracy: 0.7378 - val_loss: 0.7226 - val_accuracy: 0.5091
Epoch 33/300
164/164 [==============================] - 0s 104us/step - loss: 0.5988 - accuracy: 0.7317 - val_loss: 0.7280 - val_accuracy: 0.5091
Epoch 34/300
164/164 [==============================] - 0s 110us/step - loss: 0.5988 - accuracy: 0.7439 - val_loss: 0.7300 - val_accuracy: 0.5273
Epoch 35/300
164/164 [==============================] - 0s 104us/step - loss: 0.5953 - accuracy: 0.7378 - val_loss: 0.7286 - val_accuracy: 0.5818
Epoch 36/300
164/164 [==============================] - 0s 110us/step - loss: 0.5917 - accuracy: 0.7256 - val_loss: 0.7280 - val_accuracy: 0.6182
Epoch 37/300
164/164 [==============================] - 0s 110us/step - loss: 0.5916 - accuracy: 0.7256 - val_loss: 0.7269 - val_accuracy: 0.6182

Epoch 00037: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 38/300
164/164 [==============================] - 0s 98us/step - loss: 0.5880 - accuracy: 0.7195 - val_loss: 0.7256 - val_accuracy: 0.5455
Epoch 39/300
164/164 [==============================] - 0s 104us/step - loss: 0.5864 - accuracy: 0.7256 - val_loss: 0.7251 - val_accuracy: 0.5455
Epoch 40/300
164/164 [==============================] - 0s 104us/step - loss: 0.5853 - accuracy: 0.7256 - val_loss: 0.7253 - val_accuracy: 0.5273
Epoch 41/300
164/164 [==============================] - 0s 104us/step - loss: 0.5854 - accuracy: 0.7195 - val_loss: 0.7244 - val_accuracy: 0.5273
Epoch 42/300
164/164 [==============================] - 0s 97us/step - loss: 0.5828 - accuracy: 0.7134 - val_loss: 0.7246 - val_accuracy: 0.5636
Epoch 43/300
164/164 [==============================] - 0s 104us/step - loss: 0.5826 - accuracy: 0.7195 - val_loss: 0.7240 - val_accuracy: 0.5636
Epoch 44/300
164/164 [==============================] - 0s 98us/step - loss: 0.5819 - accuracy: 0.7134 - val_loss: 0.7220 - val_accuracy: 0.6364
Epoch 45/300
164/164 [==============================] - ETA: 0s - loss: 0.6176 - accuracy: 0.65 - 0s 128us/step - loss: 0.5803 - accuracy: 0.7256 - val_loss: 0.7210 - val_accuracy: 0.6364
Epoch 46/300
164/164 [==============================] - 0s 104us/step - loss: 0.5800 - accuracy: 0.7195 - val_loss: 0.7208 - val_accuracy: 0.5818
Epoch 47/300
164/164 [==============================] - 0s 104us/step - loss: 0.5783 - accuracy: 0.7195 - val_loss: 0.7210 - val_accuracy: 0.5818

Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 48/300
164/164 [==============================] - 0s 110us/step - loss: 0.5772 - accuracy: 0.7378 - val_loss: 0.7217 - val_accuracy: 0.5818
Epoch 49/300
164/164 [==============================] - 0s 104us/step - loss: 0.5769 - accuracy: 0.7378 - val_loss: 0.7219 - val_accuracy: 0.6000
Epoch 50/300
164/164 [==============================] - 0s 91us/step - loss: 0.5770 - accuracy: 0.7378 - val_loss: 0.7218 - val_accuracy: 0.5818
Epoch 51/300
164/164 [==============================] - 0s 104us/step - loss: 0.5763 - accuracy: 0.7378 - val_loss: 0.7214 - val_accuracy: 0.6000
Epoch 52/300
164/164 [==============================] - 0s 104us/step - loss: 0.5762 - accuracy: 0.7256 - val_loss: 0.7220 - val_accuracy: 0.6000
Epoch 53/300
164/164 [==============================] - 0s 116us/step - loss: 0.5756 - accuracy: 0.7378 - val_loss: 0.7236 - val_accuracy: 0.6000
Epoch 54/300
164/164 [==============================] - 0s 116us/step - loss: 0.5746 - accuracy: 0.7439 - val_loss: 0.7252 - val_accuracy: 0.6000
Epoch 55/300
164/164 [==============================] - 0s 116us/step - loss: 0.5737 - accuracy: 0.7439 - val_loss: 0.7268 - val_accuracy: 0.5818
Epoch 56/300
164/164 [==============================] - 0s 110us/step - loss: 0.5733 - accuracy: 0.7439 - val_loss: 0.7271 - val_accuracy: 0.6000
Epoch 57/300
164/164 [==============================] - 0s 104us/step - loss: 0.5718 - accuracy: 0.7439 - val_loss: 0.7275 - val_accuracy: 0.6000

Epoch 00057: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 58/300
164/164 [==============================] - 0s 104us/step - loss: 0.5712 - accuracy: 0.7439 - val_loss: 0.7281 - val_accuracy: 0.6000
Epoch 59/300
164/164 [==============================] - 0s 110us/step - loss: 0.5708 - accuracy: 0.7378 - val_loss: 0.7285 - val_accuracy: 0.6000
Epoch 60/300
164/164 [==============================] - 0s 110us/step - loss: 0.5707 - accuracy: 0.7500 - val_loss: 0.7293 - val_accuracy: 0.5818
Epoch 61/300
164/164 [==============================] - 0s 104us/step - loss: 0.5704 - accuracy: 0.7561 - val_loss: 0.7298 - val_accuracy: 0.5818
Epoch 62/300
164/164 [==============================] - 0s 104us/step - loss: 0.5700 - accuracy: 0.7561 - val_loss: 0.7297 - val_accuracy: 0.6000
Epoch 63/300
164/164 [==============================] - 0s 110us/step - loss: 0.5696 - accuracy: 0.7500 - val_loss: 0.7296 - val_accuracy: 0.6000
Epoch 64/300
164/164 [==============================] - 0s 98us/step - loss: 0.5693 - accuracy: 0.7378 - val_loss: 0.7300 - val_accuracy: 0.5636
Epoch 65/300
164/164 [==============================] - 0s 104us/step - loss: 0.5691 - accuracy: 0.7439 - val_loss: 0.7303 - val_accuracy: 0.5455
Epoch 66/300
164/164 [==============================] - 0s 98us/step - loss: 0.5685 - accuracy: 0.7439 - val_loss: 0.7303 - val_accuracy: 0.5636
Epoch 67/300
164/164 [==============================] - 0s 98us/step - loss: 0.5687 - accuracy: 0.7317 - val_loss: 0.7304 - val_accuracy: 0.5636

Epoch 00067: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 68/300
164/164 [==============================] - 0s 104us/step - loss: 0.5681 - accuracy: 0.7317 - val_loss: 0.7303 - val_accuracy: 0.5636
Epoch 69/300
164/164 [==============================] - 0s 104us/step - loss: 0.5681 - accuracy: 0.7317 - val_loss: 0.7301 - val_accuracy: 0.5636
Epoch 70/300
164/164 [==============================] - 0s 98us/step - loss: 0.5680 - accuracy: 0.7317 - val_loss: 0.7302 - val_accuracy: 0.5636
Epoch 71/300
164/164 [==============================] - 0s 116us/step - loss: 0.5677 - accuracy: 0.7317 - val_loss: 0.7303 - val_accuracy: 0.5818
Epoch 72/300
164/164 [==============================] - 0s 104us/step - loss: 0.5676 - accuracy: 0.7317 - val_loss: 0.7305 - val_accuracy: 0.5636
Epoch 73/300
164/164 [==============================] - 0s 98us/step - loss: 0.5674 - accuracy: 0.7317 - val_loss: 0.7309 - val_accuracy: 0.5636
Epoch 74/300
164/164 [==============================] - 0s 98us/step - loss: 0.5671 - accuracy: 0.7317 - val_loss: 0.7312 - val_accuracy: 0.5636
Epoch 75/300
164/164 [==============================] - 0s 110us/step - loss: 0.5669 - accuracy: 0.7317 - val_loss: 0.7316 - val_accuracy: 0.5636
Epoch 76/300
164/164 [==============================] - 0s 104us/step - loss: 0.5668 - accuracy: 0.7317 - val_loss: 0.7318 - val_accuracy: 0.5636
Epoch 77/300
164/164 [==============================] - 0s 104us/step - loss: 0.5666 - accuracy: 0.7317 - val_loss: 0.7318 - val_accuracy: 0.5818

Epoch 00077: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 78/300
164/164 [==============================] - 0s 110us/step - loss: 0.5665 - accuracy: 0.7317 - val_loss: 0.7320 - val_accuracy: 0.5818
Epoch 79/300
164/164 [==============================] - 0s 91us/step - loss: 0.5664 - accuracy: 0.7317 - val_loss: 0.7320 - val_accuracy: 0.5818
Epoch 80/300
164/164 [==============================] - 0s 104us/step - loss: 0.5663 - accuracy: 0.7317 - val_loss: 0.7320 - val_accuracy: 0.5818
Epoch 81/300
164/164 [==============================] - 0s 110us/step - loss: 0.5662 - accuracy: 0.7317 - val_loss: 0.7319 - val_accuracy: 0.5818
Epoch 82/300
164/164 [==============================] - 0s 98us/step - loss: 0.5661 - accuracy: 0.7317 - val_loss: 0.7321 - val_accuracy: 0.5818
Epoch 83/300
164/164 [==============================] - 0s 122us/step - loss: 0.5661 - accuracy: 0.7317 - val_loss: 0.7323 - val_accuracy: 0.5818
Epoch 84/300
164/164 [==============================] - 0s 110us/step - loss: 0.5660 - accuracy: 0.7256 - val_loss: 0.7324 - val_accuracy: 0.5818
Epoch 85/300
164/164 [==============================] - 0s 104us/step - loss: 0.5659 - accuracy: 0.7317 - val_loss: 0.7325 - val_accuracy: 0.5818
Epoch 86/300
164/164 [==============================] - 0s 110us/step - loss: 0.5659 - accuracy: 0.7378 - val_loss: 0.7325 - val_accuracy: 0.5818
Epoch 87/300
164/164 [==============================] - 0s 104us/step - loss: 0.5657 - accuracy: 0.7378 - val_loss: 0.7325 - val_accuracy: 0.5818

Epoch 00087: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 88/300
164/164 [==============================] - 0s 134us/step - loss: 0.5657 - accuracy: 0.7378 - val_loss: 0.7325 - val_accuracy: 0.5636
Epoch 89/300
164/164 [==============================] - 0s 140us/step - loss: 0.5656 - accuracy: 0.7378 - val_loss: 0.7325 - val_accuracy: 0.5636
Epoch 90/300
164/164 [==============================] - 0s 140us/step - loss: 0.5656 - accuracy: 0.7378 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 91/300
164/164 [==============================] - 0s 134us/step - loss: 0.5656 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 92/300
164/164 [==============================] - 0s 122us/step - loss: 0.5655 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636
Epoch 93/300
164/164 [==============================] - 0s 122us/step - loss: 0.5655 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636
Epoch 94/300
164/164 [==============================] - 0s 122us/step - loss: 0.5654 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636
Epoch 95/300
164/164 [==============================] - 0s 122us/step - loss: 0.5653 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636
Epoch 96/300
164/164 [==============================] - 0s 128us/step - loss: 0.5653 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636
Epoch 97/300
164/164 [==============================] - 0s 122us/step - loss: 0.5652 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636

Epoch 00097: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 98/300
164/164 [==============================] - 0s 128us/step - loss: 0.5652 - accuracy: 0.7439 - val_loss: 0.7328 - val_accuracy: 0.5636
Epoch 99/300
164/164 [==============================] - 0s 110us/step - loss: 0.5652 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 100/300
164/164 [==============================] - 0s 116us/step - loss: 0.5652 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 101/300
164/164 [==============================] - 0s 116us/step - loss: 0.5652 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 102/300
164/164 [==============================] - 0s 110us/step - loss: 0.5652 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 103/300
164/164 [==============================] - 0s 110us/step - loss: 0.5651 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 104/300
164/164 [==============================] - 0s 116us/step - loss: 0.5651 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 105/300
164/164 [==============================] - 0s 134us/step - loss: 0.5651 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 106/300
164/164 [==============================] - 0s 128us/step - loss: 0.5651 - accuracy: 0.7439 - val_loss: 0.7325 - val_accuracy: 0.5636
Epoch 107/300
164/164 [==============================] - 0s 122us/step - loss: 0.5651 - accuracy: 0.7439 - val_loss: 0.7325 - val_accuracy: 0.5636

Epoch 00107: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 108/300
164/164 [==============================] - 0s 134us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 109/300
164/164 [==============================] - 0s 134us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 110/300
164/164 [==============================] - 0s 122us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 111/300
164/164 [==============================] - 0s 140us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 112/300
164/164 [==============================] - 0s 128us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 113/300
164/164 [==============================] - 0s 140us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 114/300
164/164 [==============================] - 0s 128us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 115/300
164/164 [==============================] - 0s 165us/step - loss: 0.5650 - accuracy: 0.7439 - val_loss: 0.7326 - val_accuracy: 0.5636
Epoch 116/300
164/164 [==============================] - 0s 165us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 117/300
164/164 [==============================] - 0s 122us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00117: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 118/300
164/164 [==============================] - 0s 134us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 119/300
164/164 [==============================] - 0s 122us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 120/300
164/164 [==============================] - 0s 134us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 121/300
164/164 [==============================] - 0s 140us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 122/300
164/164 [==============================] - 0s 128us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 123/300
164/164 [==============================] - 0s 134us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 124/300
164/164 [==============================] - 0s 128us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 125/300
164/164 [==============================] - 0s 158us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 126/300
164/164 [==============================] - 0s 128us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 127/300
164/164 [==============================] - 0s 116us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00127: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 128/300
164/164 [==============================] - 0s 122us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 129/300
164/164 [==============================] - 0s 116us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 130/300
164/164 [==============================] - 0s 116us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 131/300
164/164 [==============================] - 0s 146us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 132/300
164/164 [==============================] - 0s 128us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 133/300
164/164 [==============================] - 0s 140us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 134/300
164/164 [==============================] - 0s 116us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 135/300
164/164 [==============================] - 0s 110us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 136/300
164/164 [==============================] - 0s 122us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 137/300
164/164 [==============================] - 0s 116us/step - loss: 0.5649 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00137: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 138/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 139/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 140/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 141/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 142/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 143/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 144/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 145/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 146/300
164/164 [==============================] - 0s 152us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 147/300
164/164 [==============================] - 0s 146us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00147: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 148/300
164/164 [==============================] - 0s 140us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 149/300
164/164 [==============================] - 0s 140us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 150/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 151/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 152/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 153/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 154/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 155/300
164/164 [==============================] - 0s 140us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 156/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 157/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00157: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 158/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 159/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 160/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 161/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 162/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 163/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 164/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 165/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 166/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 167/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00167: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 168/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 169/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 170/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 171/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 172/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 173/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 174/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 175/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 176/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 177/300
164/164 [==============================] - 0s 152us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00177: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 178/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 179/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 180/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 181/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 182/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 183/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 184/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 185/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 186/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 187/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00187: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 188/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 189/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 190/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 191/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 192/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 193/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 194/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 195/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 196/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 197/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00197: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 198/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 199/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 200/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 201/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 202/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 203/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 204/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 205/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 206/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 207/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00207: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
Epoch 208/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 209/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 210/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 211/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 212/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 213/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 214/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 215/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 216/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 217/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00217: ReduceLROnPlateau reducing learning rate to 4.76837147544984e-09.
Epoch 218/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 219/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 220/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 221/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 222/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 223/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 224/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 225/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 226/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 227/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00227: ReduceLROnPlateau reducing learning rate to 2.38418573772492e-09.
Epoch 228/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 229/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 230/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 231/300
164/164 [==============================] - 0s 140us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 232/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 233/300
164/164 [==============================] - 0s 122us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 234/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 235/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 236/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 237/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00237: ReduceLROnPlateau reducing learning rate to 1.19209286886246e-09.
Epoch 238/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 239/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 240/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 241/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 242/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 243/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 244/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 245/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 246/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 247/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00247: ReduceLROnPlateau reducing learning rate to 5.9604643443123e-10.
Epoch 248/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 249/300
164/164 [==============================] - 0s 91us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 250/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 251/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 252/300
164/164 [==============================] - 0s 91us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 253/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 254/300
164/164 [==============================] - 0s 91us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 255/300
164/164 [==============================] - 0s 97us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 256/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 257/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00257: ReduceLROnPlateau reducing learning rate to 2.98023217215615e-10.
Epoch 258/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 259/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 260/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 261/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 262/300
164/164 [==============================] - 0s 97us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 263/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 264/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 265/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 266/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 267/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00267: ReduceLROnPlateau reducing learning rate to 1.490116086078075e-10.
Epoch 268/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 269/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 270/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 271/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 272/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 273/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 274/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 275/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 276/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 277/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00277: ReduceLROnPlateau reducing learning rate to 7.450580430390374e-11.
Epoch 278/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 279/300
164/164 [==============================] - 0s 128us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 280/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 281/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 282/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 283/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 284/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 285/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 286/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 287/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00287: ReduceLROnPlateau reducing learning rate to 3.725290215195187e-11.
Epoch 288/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 289/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 290/300
164/164 [==============================] - 0s 140us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 291/300
164/164 [==============================] - 0s 116us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 292/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 293/300
164/164 [==============================] - 0s 110us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 294/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 295/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 296/300
164/164 [==============================] - 0s 134us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 297/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636

Epoch 00297: ReduceLROnPlateau reducing learning rate to 1.8626451075975936e-11.
Epoch 298/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 299/300
164/164 [==============================] - 0s 104us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
Epoch 300/300
164/164 [==============================] - 0s 98us/step - loss: 0.5648 - accuracy: 0.7439 - val_loss: 0.7327 - val_accuracy: 0.5636
In [230]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 300)
In [231]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
55/55 [==============================] - 0s 55us/step
test loss: 0.732729638706554, test accuracy: 0.5636363625526428
In [232]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.5621693121693122
In [233]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.12350597609561753
[[19  9]
 [15 12]]

KMeans

In [348]:
X
Out[348]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 -1.035481 1.779354 1.874576 0.924814 -0.129662 2.608421
1 0.965487 -0.399971 -1.606069 0.008311 0.834341 0.513694
2 -0.141249 -1.969933 -0.960470 1.005123 -1.117123 -2.399517
3 -1.590590 -0.729741 -0.575342 0.587988 -0.885561 -0.752828
4 -0.391524 -0.894181 -0.426309 1.017585 -0.391173 -0.920259
5 -1.256622 -0.886861 -0.850243 0.516749 -0.491454 -0.072867
6 -1.579202 0.121365 -0.522749 1.012025 -0.547676 -0.140430
7 -1.760350 -0.182429 -0.008789 1.576085 -0.878841 0.252104
8 1.115526 1.555384 0.609404 0.558809 -0.514428 -0.221726
9 1.467291 1.402697 0.806896 -0.279535 0.939735 0.758333
10 0.972379 1.550575 -0.223468 0.899199 1.412818 0.386724
11 0.294385 0.890870 0.493531 0.142145 0.212432 1.463886
12 0.795134 0.176458 1.588747 -0.412034 -0.982878 -0.299581
13 0.694481 0.577820 0.319393 0.451356 0.219257 0.563199
14 1.169114 0.075245 -0.980006 1.330732 2.094068 1.785970
15 0.962642 0.380225 -1.261850 1.044019 1.339949 1.776870
16 1.352245 0.463507 -0.679184 0.941519 2.196602 1.991369
17 1.784002 -1.453636 -1.128885 -0.626496 0.399672 -0.605861
18 0.929212 -0.538274 -1.016394 -0.167176 0.557933 1.511009
19 1.199761 -0.727252 0.322239 -1.105069 -0.125311 -0.979170
20 -0.485056 0.796900 0.581966 1.884586 -0.705890 -0.725300
21 -0.547233 0.692440 -0.162284 2.025268 -0.631876 -0.337240
22 1.446103 -0.074850 -0.132752 -0.064117 -0.209506 -0.465551
23 -0.312063 0.030270 -1.160963 0.726155 -1.511552 -0.509175
24 1.175126 -0.143713 -0.522479 0.641015 0.500311 0.617748
25 -1.044292 -0.058933 -1.340279 -1.302246 1.751828 -0.815403
26 -0.849044 0.079838 -0.400536 -1.312330 1.498217 -0.550869
27 -0.730672 -0.326196 -0.478608 -0.832610 -0.556236 -0.653280
28 -0.380922 -0.892886 -0.555313 -0.113628 1.211258 -0.901155
29 -0.368302 -1.168844 -0.094765 -0.158075 1.016584 -1.274561
... ... ... ... ... ... ...
189 -1.023243 0.827082 0.695531 0.482823 -0.093190 -0.130945
190 1.643548 -0.570770 0.545333 -0.137189 0.295910 -0.891672
191 1.543182 -0.533850 0.979103 0.227528 0.216491 -0.016099
192 1.416929 -1.770555 0.592692 -1.546796 -0.112419 -0.017441
193 -1.336444 0.162214 -1.528887 1.340066 0.343647 -0.060973
194 -0.331197 -0.545328 0.449891 -2.242097 0.210220 1.299600
195 -0.991382 -0.378373 -0.215170 -2.818431 1.156878 -0.599042
196 0.827092 0.502299 0.219306 1.474834 0.577530 0.832676
197 0.976291 0.325663 -0.091820 0.723604 0.494609 0.610596
198 0.903378 0.857383 0.090549 0.948012 1.127442 0.927032
199 -1.135922 -0.217483 -0.201444 0.204262 -0.033230 -0.725561
200 -1.143077 -0.289624 -0.109440 0.093244 0.007101 -0.571608
201 -1.325584 -0.109383 -0.850284 -0.442939 0.518129 -0.996845
202 0.270878 1.568003 -0.899682 0.187348 -0.995623 0.436835
203 -0.010376 1.403657 -0.298654 0.126520 -0.803249 -0.284875
204 -0.149606 0.679408 -0.527828 0.145473 0.226461 0.232361
205 -1.281900 0.472582 2.041397 -0.186464 1.140780 -0.694445
206 -1.561361 0.699591 0.373931 0.512801 0.245563 -1.259098
207 -0.548022 0.646014 -0.015758 -0.364427 1.106060 -0.395692
208 -0.689835 0.729721 0.242422 0.167324 -0.269920 0.625568
209 -1.182263 0.898528 0.655331 1.146978 -0.973699 0.509883
210 -0.465862 0.576977 -0.088421 1.290934 0.648005 0.669298
211 -0.265321 1.252143 0.230904 0.383047 -0.920749 0.237760
212 0.205358 1.300786 0.929349 -0.432002 -0.464366 -0.242135
213 -0.025600 0.467818 0.261063 -1.437444 -0.391460 -0.995280
214 -1.082557 1.025513 2.276661 1.056731 0.361540 1.291351
215 -1.297371 1.948703 2.264684 1.377703 1.194669 1.983124
216 -0.926424 0.162164 1.016687 1.945841 -1.341651 0.150826
217 -1.375041 -0.362757 -0.599873 1.478900 -0.021584 -0.846072
218 -0.974264 0.740461 0.889462 0.014997 1.024334 -0.992000

219 rows × 6 columns

In [349]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[349]:
[1314.0,
 1103.6617898421102,
 933.9046374976435,
 830.0952355796812,
 752.8157274494505,
 696.8283563577859,
 641.355058887789,
 599.6834692450786,
 558.0899857646746,
 538.6016435622136,
 502.540180641064,
 477.03865333096127,
 457.1745404655215,
 443.08717934712786]
In [350]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[350]:
[<matplotlib.lines.Line2D at 0x1b82c660da0>]

K=3

In [351]:
kmeans_tc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[351]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [352]:
kmeans_tc.labels_
Out[352]:
array([2, 2, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2,
       0, 2, 2, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       2, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 2,
       0, 0, 0, 0, 1, 0, 2, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 2, 0, 2, 0,
       2, 1, 1, 2, 2, 1, 2, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 1, 1, 0,
       2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2,
       2, 0, 0, 0, 2, 0, 0, 2, 1, 0, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
       0, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 1, 1, 1, 1, 1, 1, 2, 1,
       1, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 1, 0, 1, 2, 2,
       2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 1])
In [353]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[353]:
array([2, 2, 0, 1, 0, 1, 1, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2,
       0, 2, 2, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1,
       2, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 2, 0, 2, 1, 1, 2, 2, 2,
       0, 0, 0, 0, 1, 0, 2, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 2, 0, 2, 0,
       2, 1, 1, 2, 2, 1, 2, 2, 2, 0, 0, 0, 1, 1, 0, 1, 1, 1, 2, 1, 1, 0,
       2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2,
       2, 0, 0, 0, 2, 0, 0, 2, 1, 0, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
       0, 0, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 1, 1, 1, 1, 1, 1, 2, 1,
       1, 0, 0, 0, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 1, 0, 1, 2, 2,
       2, 1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 1])
In [354]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [355]:
X
Out[355]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 -1.035481 1.779354 1.874576 0.924814 -0.129662 2.608421 2 0
1 0.965487 -0.399971 -1.606069 0.008311 0.834341 0.513694 2 0
2 -0.141249 -1.969933 -0.960470 1.005123 -1.117123 -2.399517 0 0
3 -1.590590 -0.729741 -0.575342 0.587988 -0.885561 -0.752828 1 0
4 -0.391524 -0.894181 -0.426309 1.017585 -0.391173 -0.920259 0 0
5 -1.256622 -0.886861 -0.850243 0.516749 -0.491454 -0.072867 1 0
6 -1.579202 0.121365 -0.522749 1.012025 -0.547676 -0.140430 1 0
7 -1.760350 -0.182429 -0.008789 1.576085 -0.878841 0.252104 2 0
8 1.115526 1.555384 0.609404 0.558809 -0.514428 -0.221726 2 0
9 1.467291 1.402697 0.806896 -0.279535 0.939735 0.758333 2 0
10 0.972379 1.550575 -0.223468 0.899199 1.412818 0.386724 2 0
11 0.294385 0.890870 0.493531 0.142145 0.212432 1.463886 2 0
12 0.795134 0.176458 1.588747 -0.412034 -0.982878 -0.299581 0 0
13 0.694481 0.577820 0.319393 0.451356 0.219257 0.563199 2 0
14 1.169114 0.075245 -0.980006 1.330732 2.094068 1.785970 2 0
15 0.962642 0.380225 -1.261850 1.044019 1.339949 1.776870 2 0
16 1.352245 0.463507 -0.679184 0.941519 2.196602 1.991369 2 0
17 1.784002 -1.453636 -1.128885 -0.626496 0.399672 -0.605861 0 0
18 0.929212 -0.538274 -1.016394 -0.167176 0.557933 1.511009 2 0
19 1.199761 -0.727252 0.322239 -1.105069 -0.125311 -0.979170 0 0
20 -0.485056 0.796900 0.581966 1.884586 -0.705890 -0.725300 2 0
21 -0.547233 0.692440 -0.162284 2.025268 -0.631876 -0.337240 2 0
22 1.446103 -0.074850 -0.132752 -0.064117 -0.209506 -0.465551 0 0
23 -0.312063 0.030270 -1.160963 0.726155 -1.511552 -0.509175 2 0
24 1.175126 -0.143713 -0.522479 0.641015 0.500311 0.617748 2 0
25 -1.044292 -0.058933 -1.340279 -1.302246 1.751828 -0.815403 1 0
26 -0.849044 0.079838 -0.400536 -1.312330 1.498217 -0.550869 1 0
27 -0.730672 -0.326196 -0.478608 -0.832610 -0.556236 -0.653280 1 0
28 -0.380922 -0.892886 -0.555313 -0.113628 1.211258 -0.901155 1 0
29 -0.368302 -1.168844 -0.094765 -0.158075 1.016584 -1.274561 1 0
... ... ... ... ... ... ... ... ...
189 -1.023243 0.827082 0.695531 0.482823 -0.093190 -0.130945 2 1
190 1.643548 -0.570770 0.545333 -0.137189 0.295910 -0.891672 0 1
191 1.543182 -0.533850 0.979103 0.227528 0.216491 -0.016099 0 1
192 1.416929 -1.770555 0.592692 -1.546796 -0.112419 -0.017441 0 1
193 -1.336444 0.162214 -1.528887 1.340066 0.343647 -0.060973 1 1
194 -0.331197 -0.545328 0.449891 -2.242097 0.210220 1.299600 0 1
195 -0.991382 -0.378373 -0.215170 -2.818431 1.156878 -0.599042 1 1
196 0.827092 0.502299 0.219306 1.474834 0.577530 0.832676 2 1
197 0.976291 0.325663 -0.091820 0.723604 0.494609 0.610596 2 1
198 0.903378 0.857383 0.090549 0.948012 1.127442 0.927032 2 1
199 -1.135922 -0.217483 -0.201444 0.204262 -0.033230 -0.725561 1 1
200 -1.143077 -0.289624 -0.109440 0.093244 0.007101 -0.571608 1 1
201 -1.325584 -0.109383 -0.850284 -0.442939 0.518129 -0.996845 1 1
202 0.270878 1.568003 -0.899682 0.187348 -0.995623 0.436835 2 1
203 -0.010376 1.403657 -0.298654 0.126520 -0.803249 -0.284875 2 1
204 -0.149606 0.679408 -0.527828 0.145473 0.226461 0.232361 2 1
205 -1.281900 0.472582 2.041397 -0.186464 1.140780 -0.694445 1 1
206 -1.561361 0.699591 0.373931 0.512801 0.245563 -1.259098 1 1
207 -0.548022 0.646014 -0.015758 -0.364427 1.106060 -0.395692 1 1
208 -0.689835 0.729721 0.242422 0.167324 -0.269920 0.625568 2 1
209 -1.182263 0.898528 0.655331 1.146978 -0.973699 0.509883 2 1
210 -0.465862 0.576977 -0.088421 1.290934 0.648005 0.669298 2 1
211 -0.265321 1.252143 0.230904 0.383047 -0.920749 0.237760 2 1
212 0.205358 1.300786 0.929349 -0.432002 -0.464366 -0.242135 2 1
213 -0.025600 0.467818 0.261063 -1.437444 -0.391460 -0.995280 1 1
214 -1.082557 1.025513 2.276661 1.056731 0.361540 1.291351 2 1
215 -1.297371 1.948703 2.264684 1.377703 1.194669 1.983124 2 1
216 -0.926424 0.162164 1.016687 1.945841 -1.341651 0.150826 2 1
217 -1.375041 -0.362757 -0.599873 1.478900 -0.021584 -0.846072 1 1
218 -0.974264 0.740461 0.889462 0.014997 1.024334 -0.992000 1 1

219 rows × 8 columns

In [356]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[356]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b82c6ae978>
In [357]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

Urban Place

ANN

In [234]:
X = df_n_ps_std_tc[5]
In [235]:
y = df_n_ps[5]['chosen']
In [236]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [237]:
X_train.shape
Out[237]:
(162, 6)
In [238]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [239]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [240]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [365]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.01, 'max_iter': 300}, que permiten obtener un Accuracy de 77.78% y un Kappa del 47.35
Tiempo total: 27.94 minutos
In [241]:
grid.best_params_={'activation': 'tanh', 'hidden_layer_sizes': (30, 20, 10), 'learning_rate_init': 0.01, 'max_iter': 300}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [242]:
input_tensor = Input(shape = (n0,))
In [243]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [244]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [245]:
model.summary()
Model: "model_12"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_13 (InputLayer)        (None, 6)                 0         
_________________________________________________________________
dense_34 (Dense)             (None, 30)                210       
_________________________________________________________________
dense_35 (Dense)             (None, 20)                620       
_________________________________________________________________
dense_36 (Dense)             (None, 10)                210       
_________________________________________________________________
dense_37 (Dense)             (None, 1)                 11        
=================================================================
Total params: 1,051
Trainable params: 1,051
Non-trainable params: 0
_________________________________________________________________
In [246]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 162 samples, validate on 54 samples
Epoch 1/300
162/162 [==============================] - 0s 3ms/step - loss: 0.6994 - accuracy: 0.5370 - val_loss: 0.5857 - val_accuracy: 0.6852
Epoch 2/300
162/162 [==============================] - 0s 121us/step - loss: 0.6278 - accuracy: 0.6914 - val_loss: 0.5539 - val_accuracy: 0.7037
Epoch 3/300
162/162 [==============================] - 0s 117us/step - loss: 0.6073 - accuracy: 0.7099 - val_loss: 0.5637 - val_accuracy: 0.7407
Epoch 4/300
162/162 [==============================] - 0s 117us/step - loss: 0.5976 - accuracy: 0.6852 - val_loss: 0.5488 - val_accuracy: 0.7407
Epoch 5/300
162/162 [==============================] - 0s 111us/step - loss: 0.5667 - accuracy: 0.7037 - val_loss: 0.5419 - val_accuracy: 0.7593
Epoch 6/300
162/162 [==============================] - 0s 142us/step - loss: 0.5646 - accuracy: 0.7284 - val_loss: 0.5491 - val_accuracy: 0.7778
Epoch 7/300
162/162 [==============================] - 0s 111us/step - loss: 0.5514 - accuracy: 0.7284 - val_loss: 0.5498 - val_accuracy: 0.7593
Epoch 8/300
162/162 [==============================] - 0s 117us/step - loss: 0.5258 - accuracy: 0.7346 - val_loss: 0.5469 - val_accuracy: 0.7037
Epoch 9/300
162/162 [==============================] - 0s 142us/step - loss: 0.5067 - accuracy: 0.7284 - val_loss: 0.5490 - val_accuracy: 0.7037
Epoch 10/300
162/162 [==============================] - 0s 123us/step - loss: 0.4963 - accuracy: 0.7469 - val_loss: 0.5583 - val_accuracy: 0.6852
Epoch 11/300
162/162 [==============================] - 0s 136us/step - loss: 0.4891 - accuracy: 0.7469 - val_loss: 0.5560 - val_accuracy: 0.7037
Epoch 12/300
162/162 [==============================] - 0s 123us/step - loss: 0.4661 - accuracy: 0.7901 - val_loss: 0.5340 - val_accuracy: 0.6852
Epoch 13/300
162/162 [==============================] - 0s 142us/step - loss: 0.4753 - accuracy: 0.7716 - val_loss: 0.5548 - val_accuracy: 0.7593
Epoch 14/300
162/162 [==============================] - 0s 111us/step - loss: 0.4658 - accuracy: 0.7716 - val_loss: 0.5878 - val_accuracy: 0.6667
Epoch 15/300
162/162 [==============================] - 0s 105us/step - loss: 0.4630 - accuracy: 0.8025 - val_loss: 0.5785 - val_accuracy: 0.7593
Epoch 16/300
162/162 [==============================] - 0s 117us/step - loss: 0.4482 - accuracy: 0.8333 - val_loss: 0.5752 - val_accuracy: 0.7407

Epoch 00016: ReduceLROnPlateau reducing learning rate to 0.004999999888241291.
Epoch 17/300
162/162 [==============================] - 0s 142us/step - loss: 0.4313 - accuracy: 0.8580 - val_loss: 0.5693 - val_accuracy: 0.7407
Epoch 18/300
162/162 [==============================] - 0s 130us/step - loss: 0.4242 - accuracy: 0.8519 - val_loss: 0.5612 - val_accuracy: 0.7593
Epoch 19/300
162/162 [==============================] - 0s 123us/step - loss: 0.4176 - accuracy: 0.8519 - val_loss: 0.5413 - val_accuracy: 0.7778
Epoch 20/300
162/162 [==============================] - 0s 111us/step - loss: 0.4122 - accuracy: 0.8580 - val_loss: 0.5434 - val_accuracy: 0.7593
Epoch 21/300
162/162 [==============================] - 0s 117us/step - loss: 0.4078 - accuracy: 0.8148 - val_loss: 0.5756 - val_accuracy: 0.7593
Epoch 22/300
162/162 [==============================] - 0s 117us/step - loss: 0.4034 - accuracy: 0.8272 - val_loss: 0.5847 - val_accuracy: 0.7593
Epoch 23/300
162/162 [==============================] - 0s 123us/step - loss: 0.3991 - accuracy: 0.8272 - val_loss: 0.5888 - val_accuracy: 0.7407
Epoch 24/300
162/162 [==============================] - 0s 136us/step - loss: 0.3897 - accuracy: 0.8395 - val_loss: 0.5933 - val_accuracy: 0.7407
Epoch 25/300
162/162 [==============================] - 0s 136us/step - loss: 0.3781 - accuracy: 0.8395 - val_loss: 0.5867 - val_accuracy: 0.7407
Epoch 26/300
162/162 [==============================] - 0s 123us/step - loss: 0.3710 - accuracy: 0.8457 - val_loss: 0.5836 - val_accuracy: 0.7407

Epoch 00026: ReduceLROnPlateau reducing learning rate to 0.0024999999441206455.
Epoch 27/300
162/162 [==============================] - 0s 123us/step - loss: 0.3735 - accuracy: 0.8519 - val_loss: 0.5783 - val_accuracy: 0.7407
Epoch 28/300
162/162 [==============================] - 0s 111us/step - loss: 0.3708 - accuracy: 0.8519 - val_loss: 0.5724 - val_accuracy: 0.7222
Epoch 29/300
162/162 [==============================] - 0s 111us/step - loss: 0.3653 - accuracy: 0.8580 - val_loss: 0.5714 - val_accuracy: 0.7222
Epoch 30/300
162/162 [==============================] - 0s 117us/step - loss: 0.3590 - accuracy: 0.8580 - val_loss: 0.5768 - val_accuracy: 0.7407
Epoch 31/300
162/162 [==============================] - 0s 123us/step - loss: 0.3586 - accuracy: 0.8457 - val_loss: 0.5806 - val_accuracy: 0.7407
Epoch 32/300
162/162 [==============================] - 0s 117us/step - loss: 0.3529 - accuracy: 0.8457 - val_loss: 0.5622 - val_accuracy: 0.7407
Epoch 33/300
162/162 [==============================] - 0s 130us/step - loss: 0.3488 - accuracy: 0.8580 - val_loss: 0.5681 - val_accuracy: 0.7407
Epoch 34/300
162/162 [==============================] - 0s 123us/step - loss: 0.3495 - accuracy: 0.8580 - val_loss: 0.5797 - val_accuracy: 0.7222
Epoch 35/300
162/162 [==============================] - 0s 130us/step - loss: 0.3475 - accuracy: 0.8642 - val_loss: 0.5836 - val_accuracy: 0.7037
Epoch 36/300
162/162 [==============================] - 0s 117us/step - loss: 0.3438 - accuracy: 0.8704 - val_loss: 0.5841 - val_accuracy: 0.7037

Epoch 00036: ReduceLROnPlateau reducing learning rate to 0.0012499999720603228.
Epoch 37/300
162/162 [==============================] - 0s 130us/step - loss: 0.3412 - accuracy: 0.8580 - val_loss: 0.5847 - val_accuracy: 0.7222
Epoch 38/300
162/162 [==============================] - 0s 154us/step - loss: 0.3390 - accuracy: 0.8580 - val_loss: 0.5848 - val_accuracy: 0.7037
Epoch 39/300
162/162 [==============================] - 0s 117us/step - loss: 0.3362 - accuracy: 0.8580 - val_loss: 0.5841 - val_accuracy: 0.7037
Epoch 40/300
162/162 [==============================] - 0s 123us/step - loss: 0.3376 - accuracy: 0.8642 - val_loss: 0.5855 - val_accuracy: 0.6852
Epoch 41/300
162/162 [==============================] - 0s 123us/step - loss: 0.3354 - accuracy: 0.8765 - val_loss: 0.5800 - val_accuracy: 0.7222
Epoch 42/300
162/162 [==============================] - 0s 123us/step - loss: 0.3329 - accuracy: 0.8827 - val_loss: 0.5728 - val_accuracy: 0.7407
Epoch 43/300
162/162 [==============================] - 0s 130us/step - loss: 0.3278 - accuracy: 0.8704 - val_loss: 0.5693 - val_accuracy: 0.7222
Epoch 44/300
162/162 [==============================] - 0s 136us/step - loss: 0.3273 - accuracy: 0.8519 - val_loss: 0.5649 - val_accuracy: 0.7222
Epoch 45/300
162/162 [==============================] - 0s 117us/step - loss: 0.3275 - accuracy: 0.8519 - val_loss: 0.5619 - val_accuracy: 0.7222
Epoch 46/300
162/162 [==============================] - 0s 111us/step - loss: 0.3288 - accuracy: 0.8519 - val_loss: 0.5581 - val_accuracy: 0.7407

Epoch 00046: ReduceLROnPlateau reducing learning rate to 0.0006249999860301614.
Epoch 47/300
162/162 [==============================] - 0s 117us/step - loss: 0.3272 - accuracy: 0.8704 - val_loss: 0.5590 - val_accuracy: 0.7407
Epoch 48/300
162/162 [==============================] - 0s 117us/step - loss: 0.3253 - accuracy: 0.8642 - val_loss: 0.5601 - val_accuracy: 0.7407
Epoch 49/300
162/162 [==============================] - 0s 136us/step - loss: 0.3240 - accuracy: 0.8642 - val_loss: 0.5606 - val_accuracy: 0.7407
Epoch 50/300
162/162 [==============================] - 0s 136us/step - loss: 0.3226 - accuracy: 0.8642 - val_loss: 0.5603 - val_accuracy: 0.7407
Epoch 51/300
162/162 [==============================] - 0s 123us/step - loss: 0.3212 - accuracy: 0.8704 - val_loss: 0.5588 - val_accuracy: 0.7407
Epoch 52/300
162/162 [==============================] - 0s 130us/step - loss: 0.3204 - accuracy: 0.8704 - val_loss: 0.5586 - val_accuracy: 0.7407
Epoch 53/300
162/162 [==============================] - 0s 123us/step - loss: 0.3192 - accuracy: 0.8765 - val_loss: 0.5590 - val_accuracy: 0.7407
Epoch 54/300
162/162 [==============================] - 0s 123us/step - loss: 0.3185 - accuracy: 0.8704 - val_loss: 0.5602 - val_accuracy: 0.7407
Epoch 55/300
162/162 [==============================] - 0s 130us/step - loss: 0.3179 - accuracy: 0.8765 - val_loss: 0.5596 - val_accuracy: 0.7407
Epoch 56/300
162/162 [==============================] - 0s 117us/step - loss: 0.3171 - accuracy: 0.8827 - val_loss: 0.5607 - val_accuracy: 0.7407

Epoch 00056: ReduceLROnPlateau reducing learning rate to 0.0003124999930150807.
Epoch 57/300
162/162 [==============================] - 0s 148us/step - loss: 0.3161 - accuracy: 0.8827 - val_loss: 0.5605 - val_accuracy: 0.7407
Epoch 58/300
162/162 [==============================] - 0s 117us/step - loss: 0.3155 - accuracy: 0.8827 - val_loss: 0.5603 - val_accuracy: 0.7407
Epoch 59/300
162/162 [==============================] - 0s 123us/step - loss: 0.3152 - accuracy: 0.8827 - val_loss: 0.5610 - val_accuracy: 0.7407
Epoch 60/300
162/162 [==============================] - 0s 142us/step - loss: 0.3146 - accuracy: 0.8827 - val_loss: 0.5610 - val_accuracy: 0.7593
Epoch 61/300
162/162 [==============================] - 0s 148us/step - loss: 0.3144 - accuracy: 0.8827 - val_loss: 0.5607 - val_accuracy: 0.7593
Epoch 62/300
162/162 [==============================] - 0s 123us/step - loss: 0.3142 - accuracy: 0.8765 - val_loss: 0.5605 - val_accuracy: 0.7593
Epoch 63/300
162/162 [==============================] - 0s 117us/step - loss: 0.3138 - accuracy: 0.8827 - val_loss: 0.5607 - val_accuracy: 0.7593
Epoch 64/300
162/162 [==============================] - 0s 117us/step - loss: 0.3134 - accuracy: 0.8765 - val_loss: 0.5618 - val_accuracy: 0.7593
Epoch 65/300
162/162 [==============================] - 0s 117us/step - loss: 0.3128 - accuracy: 0.8765 - val_loss: 0.5650 - val_accuracy: 0.7407
Epoch 66/300
162/162 [==============================] - 0s 111us/step - loss: 0.3126 - accuracy: 0.8765 - val_loss: 0.5671 - val_accuracy: 0.7407

Epoch 00066: ReduceLROnPlateau reducing learning rate to 0.00015624999650754035.
Epoch 67/300
162/162 [==============================] - 0s 123us/step - loss: 0.3121 - accuracy: 0.8765 - val_loss: 0.5679 - val_accuracy: 0.7407
Epoch 68/300
162/162 [==============================] - 0s 130us/step - loss: 0.3119 - accuracy: 0.8765 - val_loss: 0.5684 - val_accuracy: 0.7407
Epoch 69/300
162/162 [==============================] - 0s 123us/step - loss: 0.3117 - accuracy: 0.8765 - val_loss: 0.5687 - val_accuracy: 0.7407
Epoch 70/300
162/162 [==============================] - 0s 136us/step - loss: 0.3116 - accuracy: 0.8765 - val_loss: 0.5692 - val_accuracy: 0.7407
Epoch 71/300
162/162 [==============================] - 0s 117us/step - loss: 0.3115 - accuracy: 0.8765 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 72/300
162/162 [==============================] - 0s 142us/step - loss: 0.3113 - accuracy: 0.8765 - val_loss: 0.5691 - val_accuracy: 0.7407
Epoch 73/300
162/162 [==============================] - 0s 142us/step - loss: 0.3111 - accuracy: 0.8765 - val_loss: 0.5689 - val_accuracy: 0.7407
Epoch 74/300
162/162 [==============================] - 0s 117us/step - loss: 0.3109 - accuracy: 0.8765 - val_loss: 0.5688 - val_accuracy: 0.7407
Epoch 75/300
162/162 [==============================] - 0s 117us/step - loss: 0.3105 - accuracy: 0.8765 - val_loss: 0.5689 - val_accuracy: 0.7407
Epoch 76/300
162/162 [==============================] - 0s 111us/step - loss: 0.3103 - accuracy: 0.8765 - val_loss: 0.5692 - val_accuracy: 0.7407

Epoch 00076: ReduceLROnPlateau reducing learning rate to 7.812499825377017e-05.
Epoch 77/300
162/162 [==============================] - 0s 148us/step - loss: 0.3101 - accuracy: 0.8765 - val_loss: 0.5694 - val_accuracy: 0.7407
Epoch 78/300
162/162 [==============================] - 0s 142us/step - loss: 0.3100 - accuracy: 0.8765 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 79/300
162/162 [==============================] - 0s 136us/step - loss: 0.3099 - accuracy: 0.8765 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 80/300
162/162 [==============================] - 0s 117us/step - loss: 0.3098 - accuracy: 0.8765 - val_loss: 0.5701 - val_accuracy: 0.7407
Epoch 81/300
162/162 [==============================] - 0s 123us/step - loss: 0.3098 - accuracy: 0.8765 - val_loss: 0.5703 - val_accuracy: 0.7407
Epoch 82/300
162/162 [==============================] - 0s 123us/step - loss: 0.3097 - accuracy: 0.8765 - val_loss: 0.5705 - val_accuracy: 0.7407
Epoch 83/300
162/162 [==============================] - 0s 123us/step - loss: 0.3096 - accuracy: 0.8827 - val_loss: 0.5706 - val_accuracy: 0.7407
Epoch 84/300
162/162 [==============================] - 0s 123us/step - loss: 0.3096 - accuracy: 0.8827 - val_loss: 0.5705 - val_accuracy: 0.7407
Epoch 85/300
162/162 [==============================] - 0s 130us/step - loss: 0.3095 - accuracy: 0.8827 - val_loss: 0.5699 - val_accuracy: 0.7407
Epoch 86/300
162/162 [==============================] - 0s 117us/step - loss: 0.3094 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407

Epoch 00086: ReduceLROnPlateau reducing learning rate to 3.9062499126885086e-05.
Epoch 87/300
162/162 [==============================] - 0s 117us/step - loss: 0.3093 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 88/300
162/162 [==============================] - 0s 148us/step - loss: 0.3093 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 89/300
162/162 [==============================] - 0s 123us/step - loss: 0.3092 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 90/300
162/162 [==============================] - 0s 142us/step - loss: 0.3092 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 91/300
162/162 [==============================] - 0s 123us/step - loss: 0.3091 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 92/300
162/162 [==============================] - 0s 142us/step - loss: 0.3091 - accuracy: 0.8827 - val_loss: 0.5699 - val_accuracy: 0.7407
Epoch 93/300
162/162 [==============================] - 0s 123us/step - loss: 0.3090 - accuracy: 0.8827 - val_loss: 0.5699 - val_accuracy: 0.7407
Epoch 94/300
162/162 [==============================] - 0s 142us/step - loss: 0.3090 - accuracy: 0.8827 - val_loss: 0.5698 - val_accuracy: 0.7407
Epoch 95/300
162/162 [==============================] - 0s 123us/step - loss: 0.3089 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 96/300
162/162 [==============================] - 0s 111us/step - loss: 0.3089 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407

Epoch 00096: ReduceLROnPlateau reducing learning rate to 1.9531249563442543e-05.
Epoch 97/300
162/162 [==============================] - 0s 136us/step - loss: 0.3088 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 98/300
162/162 [==============================] - 0s 123us/step - loss: 0.3088 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 99/300
162/162 [==============================] - 0s 123us/step - loss: 0.3088 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 100/300
162/162 [==============================] - 0s 130us/step - loss: 0.3087 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 101/300
162/162 [==============================] - 0s 123us/step - loss: 0.3087 - accuracy: 0.8827 - val_loss: 0.5698 - val_accuracy: 0.7407
Epoch 102/300
162/162 [==============================] - 0s 117us/step - loss: 0.3087 - accuracy: 0.8827 - val_loss: 0.5698 - val_accuracy: 0.7407
Epoch 103/300
162/162 [==============================] - 0s 117us/step - loss: 0.3087 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 104/300
162/162 [==============================] - 0s 136us/step - loss: 0.3087 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 105/300
162/162 [==============================] - 0s 130us/step - loss: 0.3086 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 106/300
162/162 [==============================] - 0s 142us/step - loss: 0.3086 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407

Epoch 00106: ReduceLROnPlateau reducing learning rate to 9.765624781721272e-06.
Epoch 107/300
162/162 [==============================] - 0s 130us/step - loss: 0.3086 - accuracy: 0.8827 - val_loss: 0.5697 - val_accuracy: 0.7407
Epoch 108/300
162/162 [==============================] - 0s 117us/step - loss: 0.3086 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 109/300
162/162 [==============================] - 0s 111us/step - loss: 0.3086 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 110/300
162/162 [==============================] - 0s 111us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 111/300
162/162 [==============================] - 0s 130us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 112/300
162/162 [==============================] - 0s 130us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 113/300
162/162 [==============================] - 0s 123us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 114/300
162/162 [==============================] - 0s 117us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 115/300
162/162 [==============================] - 0s 117us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 116/300
162/162 [==============================] - 0s 148us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5694 - val_accuracy: 0.7407

Epoch 00116: ReduceLROnPlateau reducing learning rate to 4.882812390860636e-06.
Epoch 117/300
162/162 [==============================] - 0s 136us/step - loss: 0.3085 - accuracy: 0.8827 - val_loss: 0.5694 - val_accuracy: 0.7407
Epoch 118/300
162/162 [==============================] - 0s 136us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5694 - val_accuracy: 0.7407
Epoch 119/300
162/162 [==============================] - 0s 117us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5694 - val_accuracy: 0.7407
Epoch 120/300
162/162 [==============================] - 0s 123us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5694 - val_accuracy: 0.7407
Epoch 121/300
162/162 [==============================] - 0s 111us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5694 - val_accuracy: 0.7407
Epoch 122/300
162/162 [==============================] - 0s 160us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 123/300
162/162 [==============================] - 0s 130us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 124/300
162/162 [==============================] - 0s 130us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 125/300
162/162 [==============================] - 0s 123us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 126/300
162/162 [==============================] - 0s 123us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00126: ReduceLROnPlateau reducing learning rate to 2.441406195430318e-06.
Epoch 127/300
162/162 [==============================] - 0s 123us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 128/300
162/162 [==============================] - 0s 117us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 129/300
162/162 [==============================] - 0s 136us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 130/300
162/162 [==============================] - 0s 111us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 131/300
162/162 [==============================] - 0s 123us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 132/300
162/162 [==============================] - 0s 117us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 133/300
162/162 [==============================] - 0s 136us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 134/300
162/162 [==============================] - 0s 105us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 135/300
162/162 [==============================] - 0s 130us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 136/300
162/162 [==============================] - 0s 123us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407

Epoch 00136: ReduceLROnPlateau reducing learning rate to 1.220703097715159e-06.
Epoch 137/300
162/162 [==============================] - 0s 130us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 138/300
162/162 [==============================] - 0s 136us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 139/300
162/162 [==============================] - 0s 130us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 140/300
162/162 [==============================] - 0s 111us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 141/300
162/162 [==============================] - 0s 111us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 142/300
162/162 [==============================] - 0s 136us/step - loss: 0.3084 - accuracy: 0.8827 - val_loss: 0.5696 - val_accuracy: 0.7407
Epoch 143/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 144/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 145/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 146/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00146: ReduceLROnPlateau reducing learning rate to 6.103515488575795e-07.
Epoch 147/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 148/300
162/162 [==============================] - 0s 148us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 149/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 150/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 151/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 152/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 153/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 154/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 155/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 156/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00156: ReduceLROnPlateau reducing learning rate to 3.0517577442878974e-07.
Epoch 157/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 158/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 159/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 160/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 161/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 162/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 163/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 164/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 165/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 166/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00166: ReduceLROnPlateau reducing learning rate to 1.5258788721439487e-07.
Epoch 167/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 168/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 169/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 170/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 171/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 172/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 173/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 174/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 175/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 176/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00176: ReduceLROnPlateau reducing learning rate to 7.629394360719743e-08.
Epoch 177/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 178/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 179/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 180/300
162/162 [==============================] - 0s 142us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 181/300
162/162 [==============================] - 0s 142us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 182/300
162/162 [==============================] - 0s 142us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 183/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 184/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 185/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 186/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00186: ReduceLROnPlateau reducing learning rate to 3.814697180359872e-08.
Epoch 187/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 188/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 189/300
162/162 [==============================] - 0s 148us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 190/300
162/162 [==============================] - 0s 142us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 191/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 192/300
162/162 [==============================] - 0s 160us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 193/300
162/162 [==============================] - 0s 160us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 194/300
162/162 [==============================] - 0s 148us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 195/300
162/162 [==============================] - 0s 148us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 196/300
162/162 [==============================] - 0s 148us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00196: ReduceLROnPlateau reducing learning rate to 1.907348590179936e-08.
Epoch 197/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 198/300
162/162 [==============================] - 0s 179us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 199/300
162/162 [==============================] - ETA: 0s - loss: 0.3988 - accuracy: 0.75 - 0s 142us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 200/300
162/162 [==============================] - 0s 179us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 201/300
162/162 [==============================] - 0s 154us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 202/300
162/162 [==============================] - 0s 142us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 203/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 204/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 205/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 206/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00206: ReduceLROnPlateau reducing learning rate to 9.53674295089968e-09.
Epoch 207/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 208/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 209/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 210/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 211/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 212/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 213/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 214/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 215/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 216/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00216: ReduceLROnPlateau reducing learning rate to 4.76837147544984e-09.
Epoch 217/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 218/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 219/300
162/162 [==============================] - 0s 99us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 220/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 221/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 222/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 223/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 224/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 225/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 226/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00226: ReduceLROnPlateau reducing learning rate to 2.38418573772492e-09.
Epoch 227/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 228/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 229/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 230/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 231/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 232/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 233/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 234/300
162/162 [==============================] - 0s 154us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 235/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 236/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00236: ReduceLROnPlateau reducing learning rate to 1.19209286886246e-09.
Epoch 237/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 238/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 239/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 240/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 241/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 242/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 243/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 244/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 245/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 246/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00246: ReduceLROnPlateau reducing learning rate to 5.9604643443123e-10.
Epoch 247/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 248/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 249/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 250/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 251/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 252/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 253/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 254/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 255/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 256/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00256: ReduceLROnPlateau reducing learning rate to 2.98023217215615e-10.
Epoch 257/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 258/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 259/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 260/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 261/300
162/162 [==============================] - ETA: 0s - loss: 0.3335 - accuracy: 0.78 - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 262/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 263/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 264/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 265/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 266/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00266: ReduceLROnPlateau reducing learning rate to 1.490116086078075e-10.
Epoch 267/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 268/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 269/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 270/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 271/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 272/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 273/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 274/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 275/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 276/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00276: ReduceLROnPlateau reducing learning rate to 7.450580430390374e-11.
Epoch 277/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 278/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 279/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 280/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 281/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 282/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 283/300
162/162 [==============================] - 0s 111us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 284/300
162/162 [==============================] - 0s 105us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 285/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 286/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00286: ReduceLROnPlateau reducing learning rate to 3.725290215195187e-11.
Epoch 287/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 288/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 289/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 290/300
162/162 [==============================] - 0s 173us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 291/300
162/162 [==============================] - 0s 148us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 292/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 293/300
162/162 [==============================] - 0s 136us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 294/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 295/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 296/300
162/162 [==============================] - 0s 130us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407

Epoch 00296: ReduceLROnPlateau reducing learning rate to 1.8626451075975936e-11.
Epoch 297/300
162/162 [==============================] - 0s 117us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 298/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 299/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
Epoch 300/300
162/162 [==============================] - 0s 123us/step - loss: 0.3083 - accuracy: 0.8827 - val_loss: 0.5695 - val_accuracy: 0.7407
In [247]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 300)
In [248]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
54/54 [==============================] - 0s 93us/step
test loss: 0.5695377522044711, test accuracy: 0.7407407164573669
In [249]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7103174603174603
In [250]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.32978723404255317
[[33  9]
 [ 5  7]]

KMeans

In [376]:
X
Out[376]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6
0 1.228153 -0.111909 -0.294563 -1.482099 -0.996056 0.110925
1 -0.401824 -1.384577 0.881209 0.754144 -0.158974 -1.296081
2 -0.336916 -0.720621 0.201084 0.348364 -0.165961 0.182389
3 -0.787942 0.336222 0.156622 -0.619986 1.407460 -1.245124
4 -0.881888 0.404050 0.352235 -0.615268 1.377506 -0.415796
5 -1.125948 0.284207 0.528151 1.033168 -0.517365 0.113763
6 1.864495 -0.090865 0.716920 -0.761402 0.530249 -0.035863
7 1.240845 0.249843 0.223925 -0.408088 0.166387 0.416687
8 0.875601 0.686409 0.502905 1.097471 1.257941 0.717023
9 0.394121 0.865626 -1.374420 0.173845 -0.906500 -0.686859
10 0.751603 1.089250 0.142673 0.667762 0.976845 1.048915
11 0.799422 0.971011 -0.358468 1.110658 0.116881 -0.712692
12 -1.418160 -0.117060 -0.366533 0.038435 -0.488564 -1.235111
13 -0.071703 -1.115223 1.951631 0.497004 0.033906 -0.995556
14 -1.278259 -0.004256 -0.388294 0.419647 -1.144792 -0.789842
15 -1.348400 -0.882436 0.081470 -0.422847 -1.925698 0.566488
16 -0.987764 -0.403920 1.221277 -0.674047 1.117167 0.695178
17 0.282941 1.065034 -0.777297 -0.167672 -0.691064 0.251171
18 0.002045 1.240340 -0.913457 -0.608969 -0.499640 -0.715666
19 0.177116 1.115065 -2.614488 -0.670192 -0.779129 0.025307
20 -0.182803 -1.516954 -0.973426 0.548761 1.138162 -0.638997
21 -0.170806 -1.685146 -0.648144 -0.354393 0.563785 -0.765276
22 -0.119763 -1.154938 1.627973 0.128398 -0.591403 0.461470
23 -1.077572 1.088539 0.971213 0.103589 0.025601 1.072686
24 -1.019809 1.064143 0.982876 -0.044201 -0.081801 0.760251
25 -1.096888 1.018525 -0.188419 -0.477648 -0.779052 -0.966832
26 -0.429215 1.178614 1.291963 0.764843 1.598962 0.847178
27 -1.200365 -0.470005 0.534275 0.383039 -1.396076 0.776101
28 0.871997 0.424606 1.290033 -0.011747 1.054939 -0.612275
29 1.244370 0.591384 1.478840 -0.554976 0.780633 -0.506044
... ... ... ... ... ... ...
186 -0.605024 0.939574 -0.414421 -0.916940 0.885615 -1.006592
187 1.718038 -0.200165 -1.447107 -0.867399 -0.686041 -0.208583
188 -0.120106 0.965845 -0.131325 1.649784 0.831058 -1.040351
189 1.029001 0.908828 -0.420421 1.436201 1.417017 1.986802
190 1.615723 1.026251 -0.662886 -0.619579 0.492418 1.063432
191 1.115026 0.462103 -1.306277 -0.442027 -1.399692 1.539195
192 0.811902 1.116173 0.339093 0.787392 0.009626 1.354667
193 1.133130 1.175484 -0.668705 0.390358 -1.546103 0.805030
194 -0.345830 -1.335691 1.110949 0.375368 -2.301726 -1.494080
195 1.165388 0.181448 -1.067468 1.496216 2.009639 0.973058
196 1.372897 -0.178593 -0.344421 -2.254259 -1.937220 0.099446
197 0.993975 -1.109603 -0.268101 -0.854584 -2.134417 1.246874
198 0.910787 -0.689305 0.054775 0.374695 -0.609228 0.066021
199 0.754410 1.007018 0.000807 2.080566 0.611139 0.689148
200 1.100934 0.748340 -0.138155 2.149923 1.024467 1.784464
201 0.898432 0.993838 0.300420 2.543541 0.818890 0.236400
202 2.023069 0.128358 2.552718 0.420028 0.484273 -0.510637
203 0.513160 -0.063505 1.308959 -0.740080 0.374958 -1.591480
204 0.509622 0.239118 1.804994 -0.524125 1.121726 -2.563172
205 1.142153 1.127451 0.307006 1.061001 0.536137 1.560857
206 0.900240 1.251079 0.039424 1.120889 1.056287 -0.091386
207 0.996446 1.228929 0.310422 1.145966 1.092465 -0.564371
208 -1.699954 0.922256 -1.758628 -0.010227 0.880666 -0.576796
209 -1.794948 1.077467 -1.814708 0.353202 1.329399 -0.854254
210 -1.688284 0.750915 -1.284463 0.305007 1.571732 -0.011164
211 0.709608 0.695516 -1.631065 1.034398 -0.362246 3.060986
212 0.524603 1.211853 0.889937 1.025018 0.400203 1.127006
213 0.459928 -1.312371 -0.023584 -0.263364 -1.095549 0.746115
214 -0.649891 0.216789 0.486250 -0.661054 0.414271 -0.364451
215 0.655230 -1.285507 0.582914 0.864601 -0.599593 -0.795822

216 rows × 6 columns

In [377]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[377]:
[1296.0,
 1078.1827047277895,
 923.3020026239071,
 819.5563896253416,
 739.7801417823988,
 674.6658680669257,
 625.7817512921029,
 571.4893390980873,
 532.4311978233743,
 507.9160125320259,
 477.7010786792888,
 447.87692991955134,
 435.1097377466632,
 418.2600781645563]
In [378]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[378]:
[<matplotlib.lines.Line2D at 0x1b82d1ea898>]

K=3

In [379]:
kmeans_tc = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_tc.fit(X)
Out[379]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [380]:
kmeans_tc.labels_
Out[380]:
array([0, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 1, 1, 1, 0, 1, 2, 1, 0, 1, 0,
       1, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,
       1, 1, 0, 0, 0, 1, 0, 1, 2, 2, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2,
       0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 0, 0, 1, 0, 0, 0,
       2, 2, 1, 2, 2, 0, 0, 2, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 2, 1,
       1, 1, 2, 0, 2, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 2, 1, 1, 0, 2, 1, 0, 2, 1, 2, 2, 1, 1, 2, 0, 0, 2,
       0, 2, 2, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 1, 2, 2, 0, 0, 1, 2,
       1, 1, 1, 2, 1, 0, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 2, 1, 2, 0, 0,
       0, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 1])
In [381]:
clusters_tc = kmeans_tc.predict(X)
clusters_tc
Out[381]:
array([0, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 1, 1, 1, 0, 1, 2, 1, 0, 1, 0,
       1, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,
       1, 1, 0, 0, 0, 1, 0, 1, 2, 2, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2,
       0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 0, 0, 1, 0, 0, 0,
       2, 2, 1, 2, 2, 0, 0, 2, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 2, 1,
       1, 1, 2, 0, 2, 1, 2, 2, 0, 0, 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 2, 1, 1, 0, 2, 1, 0, 2, 1, 2, 2, 1, 1, 2, 0, 0, 2,
       0, 2, 2, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 1, 2, 2, 0, 0, 1, 2,
       1, 1, 1, 2, 1, 0, 2, 2, 2, 1, 1, 0, 2, 2, 2, 0, 2, 2, 1, 2, 0, 0,
       0, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 0, 1, 1])
In [382]:
X.loc[:,'Cluster'] = clusters_tc
X.loc[:,'chosen'] = list(y)
In [383]:
X
Out[383]:
tonalcentroidfiles_1 tonalcentroidfiles_2 tonalcentroidfiles_3 tonalcentroidfiles_4 tonalcentroidfiles_5 tonalcentroidfiles_6 Cluster chosen
0 1.228153 -0.111909 -0.294563 -1.482099 -0.996056 0.110925 0 0
1 -0.401824 -1.384577 0.881209 0.754144 -0.158974 -1.296081 1 0
2 -0.336916 -0.720621 0.201084 0.348364 -0.165961 0.182389 1 0
3 -0.787942 0.336222 0.156622 -0.619986 1.407460 -1.245124 1 0
4 -0.881888 0.404050 0.352235 -0.615268 1.377506 -0.415796 1 0
5 -1.125948 0.284207 0.528151 1.033168 -0.517365 0.113763 1 0
6 1.864495 -0.090865 0.716920 -0.761402 0.530249 -0.035863 0 0
7 1.240845 0.249843 0.223925 -0.408088 0.166387 0.416687 2 0
8 0.875601 0.686409 0.502905 1.097471 1.257941 0.717023 2 0
9 0.394121 0.865626 -1.374420 0.173845 -0.906500 -0.686859 2 0
10 0.751603 1.089250 0.142673 0.667762 0.976845 1.048915 2 0
11 0.799422 0.971011 -0.358468 1.110658 0.116881 -0.712692 2 0
12 -1.418160 -0.117060 -0.366533 0.038435 -0.488564 -1.235111 1 0
13 -0.071703 -1.115223 1.951631 0.497004 0.033906 -0.995556 1 0
14 -1.278259 -0.004256 -0.388294 0.419647 -1.144792 -0.789842 1 0
15 -1.348400 -0.882436 0.081470 -0.422847 -1.925698 0.566488 0 0
16 -0.987764 -0.403920 1.221277 -0.674047 1.117167 0.695178 1 0
17 0.282941 1.065034 -0.777297 -0.167672 -0.691064 0.251171 2 0
18 0.002045 1.240340 -0.913457 -0.608969 -0.499640 -0.715666 1 0
19 0.177116 1.115065 -2.614488 -0.670192 -0.779129 0.025307 0 0
20 -0.182803 -1.516954 -0.973426 0.548761 1.138162 -0.638997 1 0
21 -0.170806 -1.685146 -0.648144 -0.354393 0.563785 -0.765276 0 0
22 -0.119763 -1.154938 1.627973 0.128398 -0.591403 0.461470 1 0
23 -1.077572 1.088539 0.971213 0.103589 0.025601 1.072686 1 0
24 -1.019809 1.064143 0.982876 -0.044201 -0.081801 0.760251 1 0
25 -1.096888 1.018525 -0.188419 -0.477648 -0.779052 -0.966832 1 0
26 -0.429215 1.178614 1.291963 0.764843 1.598962 0.847178 2 0
27 -1.200365 -0.470005 0.534275 0.383039 -1.396076 0.776101 1 0
28 0.871997 0.424606 1.290033 -0.011747 1.054939 -0.612275 1 0
29 1.244370 0.591384 1.478840 -0.554976 0.780633 -0.506044 1 0
... ... ... ... ... ... ... ... ...
186 -0.605024 0.939574 -0.414421 -0.916940 0.885615 -1.006592 1 1
187 1.718038 -0.200165 -1.447107 -0.867399 -0.686041 -0.208583 0 1
188 -0.120106 0.965845 -0.131325 1.649784 0.831058 -1.040351 2 1
189 1.029001 0.908828 -0.420421 1.436201 1.417017 1.986802 2 1
190 1.615723 1.026251 -0.662886 -0.619579 0.492418 1.063432 2 1
191 1.115026 0.462103 -1.306277 -0.442027 -1.399692 1.539195 0 1
192 0.811902 1.116173 0.339093 0.787392 0.009626 1.354667 2 1
193 1.133130 1.175484 -0.668705 0.390358 -1.546103 0.805030 2 1
194 -0.345830 -1.335691 1.110949 0.375368 -2.301726 -1.494080 1 1
195 1.165388 0.181448 -1.067468 1.496216 2.009639 0.973058 2 1
196 1.372897 -0.178593 -0.344421 -2.254259 -1.937220 0.099446 0 1
197 0.993975 -1.109603 -0.268101 -0.854584 -2.134417 1.246874 0 1
198 0.910787 -0.689305 0.054775 0.374695 -0.609228 0.066021 0 1
199 0.754410 1.007018 0.000807 2.080566 0.611139 0.689148 2 1
200 1.100934 0.748340 -0.138155 2.149923 1.024467 1.784464 2 1
201 0.898432 0.993838 0.300420 2.543541 0.818890 0.236400 2 1
202 2.023069 0.128358 2.552718 0.420028 0.484273 -0.510637 2 1
203 0.513160 -0.063505 1.308959 -0.740080 0.374958 -1.591480 1 1
204 0.509622 0.239118 1.804994 -0.524125 1.121726 -2.563172 1 1
205 1.142153 1.127451 0.307006 1.061001 0.536137 1.560857 2 1
206 0.900240 1.251079 0.039424 1.120889 1.056287 -0.091386 2 1
207 0.996446 1.228929 0.310422 1.145966 1.092465 -0.564371 2 1
208 -1.699954 0.922256 -1.758628 -0.010227 0.880666 -0.576796 1 1
209 -1.794948 1.077467 -1.814708 0.353202 1.329399 -0.854254 1 1
210 -1.688284 0.750915 -1.284463 0.305007 1.571732 -0.011164 1 1
211 0.709608 0.695516 -1.631065 1.034398 -0.362246 3.060986 2 1
212 0.524603 1.211853 0.889937 1.025018 0.400203 1.127006 2 1
213 0.459928 -1.312371 -0.023584 -0.263364 -1.095549 0.746115 0 1
214 -0.649891 0.216789 0.486250 -0.661054 0.414271 -0.364451 1 1
215 0.655230 -1.285507 0.582914 0.864601 -0.599593 -0.795822 1 1

216 rows × 8 columns

In [384]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[384]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b82d22f630>

Chromagram

In [15]:
df_n_ps_std[0].columns
Out[15]:
Index(['durationfiles', 'rmsfiles', 'rmsmedianfiles', 'lowenergyfiles',
       'ASRfiles', 'beatspectrumfiles', 'eventdensityfiles', 'tempofiles',
       'pulseclarityfiles', 'zerocrossfiles', 'rolloffsfiles',
       'brightnessfiles', 'spreadfiles', 'centroidfiles', 'kurtosisfiles',
       'flatnessfiles', 'entropyfiles', 'mfccfiles_1', 'mfccfiles_2',
       'mfccfiles_3', 'mfccfiles_4', 'mfccfiles_5', 'mfccfiles_6',
       'mfccfiles_7', 'mfccfiles_8', 'mfccfiles_9', 'mfccfiles_10',
       'mfccfiles_11', 'mfccfiles_12', 'mfccfiles_13', 'inharmonicityfiles',
       'bestkeyfiles', 'keyclarityfiles', 'modalityfiles',
       'tonalcentroidfiles_1', 'tonalcentroidfiles_2', 'tonalcentroidfiles_3',
       'tonalcentroidfiles_4', 'tonalcentroidfiles_5', 'tonalcentroidfiles_6',
       'chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12',
       'attackslopefiles', 'attackleapfiles', 'chosen'],
      dtype='object')
In [16]:
df_n_ps_std[0].columns[40:52]
Out[16]:
Index(['chromagramfiles_1', 'chromagramfiles_2', 'chromagramfiles_3',
       'chromagramfiles_4', 'chromagramfiles_5', 'chromagramfiles_6',
       'chromagramfiles_7', 'chromagramfiles_8', 'chromagramfiles_9',
       'chromagramfiles_10', 'chromagramfiles_11', 'chromagramfiles_12'],
      dtype='object')
In [17]:
df_n_ps_std_ch = [None]*len(companies)
for i in range(len(companies)):
    df_n_ps_std_ch[i] = pd.DataFrame(df_n_ps_std[i].iloc[:,40:52])
    df_n_ps_std_ch[i].columns=df_n_ps_std[i].columns[40:52]
df_n_ps_std_ch[0].info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 315 entries, 0 to 314
Data columns (total 12 columns):
chromagramfiles_1     315 non-null float64
chromagramfiles_2     315 non-null float64
chromagramfiles_3     315 non-null float64
chromagramfiles_4     315 non-null float64
chromagramfiles_5     315 non-null float64
chromagramfiles_6     315 non-null float64
chromagramfiles_7     315 non-null float64
chromagramfiles_8     315 non-null float64
chromagramfiles_9     315 non-null float64
chromagramfiles_10    315 non-null float64
chromagramfiles_11    315 non-null float64
chromagramfiles_12    315 non-null float64
dtypes: float64(12)
memory usage: 29.6 KB

Arte Francés

ANN

In [257]:
X = df_n_ps_std_ch[0]
In [258]:
y = df_n_ps[0]['chosen']
In [259]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [260]:
X_train.shape
Out[260]:
(236, 12)
In [261]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [262]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [263]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [395]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.003, 'max_iter': 200}, que permiten obtener un Accuracy de 75.85% y un Kappa del 35.14
Tiempo total: 30.13 minutos
In [264]:
grid.best_params_={'activation': 'tanh', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.003, 'max_iter': 200}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [265]:
input_tensor = Input(shape = (n0,))
In [266]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [267]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [268]:
model.summary()
Model: "model_15"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_16 (InputLayer)        (None, 12)                0         
_________________________________________________________________
dense_43 (Dense)             (None, 30)                390       
_________________________________________________________________
dense_44 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_45 (Dense)             (None, 1)                 31        
=================================================================
Total params: 1,351
Trainable params: 1,351
Non-trainable params: 0
_________________________________________________________________
In [269]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 236 samples, validate on 79 samples
Epoch 1/200
236/236 [==============================] - 0s 1ms/step - loss: 0.6878 - accuracy: 0.5508 - val_loss: 0.6586 - val_accuracy: 0.6456
Epoch 2/200
236/236 [==============================] - 0s 68us/step - loss: 0.6056 - accuracy: 0.6780 - val_loss: 0.6321 - val_accuracy: 0.6582
Epoch 3/200
236/236 [==============================] - 0s 76us/step - loss: 0.5653 - accuracy: 0.7161 - val_loss: 0.6140 - val_accuracy: 0.7089
Epoch 4/200
236/236 [==============================] - 0s 114us/step - loss: 0.5449 - accuracy: 0.7542 - val_loss: 0.6089 - val_accuracy: 0.7342
Epoch 5/200
236/236 [==============================] - 0s 68us/step - loss: 0.5377 - accuracy: 0.7373 - val_loss: 0.6197 - val_accuracy: 0.7342
Epoch 6/200
236/236 [==============================] - 0s 68us/step - loss: 0.5268 - accuracy: 0.7458 - val_loss: 0.6108 - val_accuracy: 0.7342
Epoch 7/200
236/236 [==============================] - 0s 68us/step - loss: 0.5220 - accuracy: 0.7415 - val_loss: 0.6037 - val_accuracy: 0.7342
Epoch 8/200
236/236 [==============================] - 0s 68us/step - loss: 0.5164 - accuracy: 0.7500 - val_loss: 0.6085 - val_accuracy: 0.7342
Epoch 9/200
236/236 [==============================] - 0s 80us/step - loss: 0.5079 - accuracy: 0.7542 - val_loss: 0.6164 - val_accuracy: 0.7342
Epoch 10/200
236/236 [==============================] - 0s 64us/step - loss: 0.5028 - accuracy: 0.7627 - val_loss: 0.6123 - val_accuracy: 0.7215
Epoch 11/200
236/236 [==============================] - 0s 64us/step - loss: 0.4975 - accuracy: 0.7754 - val_loss: 0.6172 - val_accuracy: 0.7342
Epoch 12/200
236/236 [==============================] - 0s 72us/step - loss: 0.4887 - accuracy: 0.7712 - val_loss: 0.6071 - val_accuracy: 0.7342
Epoch 13/200
236/236 [==============================] - 0s 68us/step - loss: 0.4833 - accuracy: 0.7669 - val_loss: 0.6131 - val_accuracy: 0.7595
Epoch 14/200
236/236 [==============================] - 0s 72us/step - loss: 0.4827 - accuracy: 0.7797 - val_loss: 0.6097 - val_accuracy: 0.7595
Epoch 15/200
236/236 [==============================] - 0s 68us/step - loss: 0.4737 - accuracy: 0.7712 - val_loss: 0.6081 - val_accuracy: 0.7468
Epoch 16/200
236/236 [==============================] - 0s 72us/step - loss: 0.4653 - accuracy: 0.7839 - val_loss: 0.6064 - val_accuracy: 0.7468
Epoch 17/200
236/236 [==============================] - 0s 76us/step - loss: 0.4567 - accuracy: 0.7881 - val_loss: 0.6056 - val_accuracy: 0.7468
Epoch 18/200
236/236 [==============================] - 0s 72us/step - loss: 0.4491 - accuracy: 0.8051 - val_loss: 0.6110 - val_accuracy: 0.7468
Epoch 19/200
236/236 [==============================] - 0s 72us/step - loss: 0.4412 - accuracy: 0.8008 - val_loss: 0.6020 - val_accuracy: 0.7595
Epoch 20/200
236/236 [==============================] - 0s 76us/step - loss: 0.4339 - accuracy: 0.8008 - val_loss: 0.6102 - val_accuracy: 0.7722
Epoch 21/200
236/236 [==============================] - 0s 72us/step - loss: 0.4248 - accuracy: 0.8093 - val_loss: 0.6192 - val_accuracy: 0.7595
Epoch 22/200
236/236 [==============================] - 0s 68us/step - loss: 0.4183 - accuracy: 0.8136 - val_loss: 0.6316 - val_accuracy: 0.7595
Epoch 23/200
236/236 [==============================] - 0s 68us/step - loss: 0.4111 - accuracy: 0.8263 - val_loss: 0.6119 - val_accuracy: 0.7722
Epoch 24/200
236/236 [==============================] - 0s 68us/step - loss: 0.3991 - accuracy: 0.8347 - val_loss: 0.6196 - val_accuracy: 0.7848
Epoch 25/200
236/236 [==============================] - 0s 68us/step - loss: 0.3958 - accuracy: 0.8347 - val_loss: 0.6337 - val_accuracy: 0.7722
Epoch 26/200
236/236 [==============================] - 0s 64us/step - loss: 0.3859 - accuracy: 0.8390 - val_loss: 0.6179 - val_accuracy: 0.7595
Epoch 27/200
236/236 [==============================] - 0s 64us/step - loss: 0.3773 - accuracy: 0.8559 - val_loss: 0.6065 - val_accuracy: 0.7595
Epoch 28/200
236/236 [==============================] - 0s 64us/step - loss: 0.3665 - accuracy: 0.8686 - val_loss: 0.6214 - val_accuracy: 0.7595
Epoch 29/200
236/236 [==============================] - 0s 68us/step - loss: 0.3607 - accuracy: 0.8686 - val_loss: 0.6501 - val_accuracy: 0.7848
Epoch 30/200
236/236 [==============================] - 0s 72us/step - loss: 0.3524 - accuracy: 0.8814 - val_loss: 0.6416 - val_accuracy: 0.7848
Epoch 31/200
236/236 [==============================] - 0s 72us/step - loss: 0.3448 - accuracy: 0.8771 - val_loss: 0.6384 - val_accuracy: 0.7722
Epoch 32/200
236/236 [==============================] - 0s 55us/step - loss: 0.3319 - accuracy: 0.9068 - val_loss: 0.6380 - val_accuracy: 0.7848
Epoch 33/200
236/236 [==============================] - 0s 64us/step - loss: 0.3248 - accuracy: 0.9025 - val_loss: 0.6375 - val_accuracy: 0.7722
Epoch 34/200
236/236 [==============================] - 0s 68us/step - loss: 0.3137 - accuracy: 0.9068 - val_loss: 0.6379 - val_accuracy: 0.7722

Epoch 00034: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 35/200
236/236 [==============================] - 0s 85us/step - loss: 0.3053 - accuracy: 0.9195 - val_loss: 0.6404 - val_accuracy: 0.7722
Epoch 36/200
236/236 [==============================] - 0s 102us/step - loss: 0.3021 - accuracy: 0.9110 - val_loss: 0.6444 - val_accuracy: 0.7722
Epoch 37/200
236/236 [==============================] - 0s 76us/step - loss: 0.2963 - accuracy: 0.9237 - val_loss: 0.6496 - val_accuracy: 0.7722
Epoch 38/200
236/236 [==============================] - 0s 80us/step - loss: 0.2929 - accuracy: 0.9322 - val_loss: 0.6603 - val_accuracy: 0.7722
Epoch 39/200
236/236 [==============================] - 0s 76us/step - loss: 0.2876 - accuracy: 0.9322 - val_loss: 0.6594 - val_accuracy: 0.7722
Epoch 40/200
236/236 [==============================] - 0s 76us/step - loss: 0.2838 - accuracy: 0.9237 - val_loss: 0.6579 - val_accuracy: 0.7722
Epoch 41/200
236/236 [==============================] - 0s 72us/step - loss: 0.2807 - accuracy: 0.9407 - val_loss: 0.6571 - val_accuracy: 0.7848
Epoch 42/200
236/236 [==============================] - 0s 72us/step - loss: 0.2750 - accuracy: 0.9492 - val_loss: 0.6442 - val_accuracy: 0.7848
Epoch 43/200
236/236 [==============================] - 0s 68us/step - loss: 0.2730 - accuracy: 0.9322 - val_loss: 0.6509 - val_accuracy: 0.7722
Epoch 44/200
236/236 [==============================] - 0s 80us/step - loss: 0.2661 - accuracy: 0.9322 - val_loss: 0.6503 - val_accuracy: 0.7848

Epoch 00044: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 45/200
236/236 [==============================] - 0s 68us/step - loss: 0.2609 - accuracy: 0.9407 - val_loss: 0.6535 - val_accuracy: 0.7848
Epoch 46/200
236/236 [==============================] - 0s 72us/step - loss: 0.2597 - accuracy: 0.9407 - val_loss: 0.6632 - val_accuracy: 0.7848
Epoch 47/200
236/236 [==============================] - 0s 68us/step - loss: 0.2568 - accuracy: 0.9407 - val_loss: 0.6629 - val_accuracy: 0.7848
Epoch 48/200
236/236 [==============================] - 0s 72us/step - loss: 0.2549 - accuracy: 0.9449 - val_loss: 0.6638 - val_accuracy: 0.7848
Epoch 49/200
236/236 [==============================] - 0s 85us/step - loss: 0.2530 - accuracy: 0.9449 - val_loss: 0.6635 - val_accuracy: 0.7848
Epoch 50/200
236/236 [==============================] - 0s 80us/step - loss: 0.2506 - accuracy: 0.9492 - val_loss: 0.6610 - val_accuracy: 0.7848
Epoch 51/200
236/236 [==============================] - 0s 76us/step - loss: 0.2487 - accuracy: 0.9449 - val_loss: 0.6626 - val_accuracy: 0.7848
Epoch 52/200
236/236 [==============================] - 0s 72us/step - loss: 0.2470 - accuracy: 0.9449 - val_loss: 0.6553 - val_accuracy: 0.7848
Epoch 53/200
236/236 [==============================] - 0s 72us/step - loss: 0.2456 - accuracy: 0.9534 - val_loss: 0.6596 - val_accuracy: 0.7722
Epoch 54/200
236/236 [==============================] - 0s 72us/step - loss: 0.2437 - accuracy: 0.9534 - val_loss: 0.6595 - val_accuracy: 0.7848

Epoch 00054: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 55/200
236/236 [==============================] - 0s 97us/step - loss: 0.2409 - accuracy: 0.9534 - val_loss: 0.6614 - val_accuracy: 0.7848
Epoch 56/200
236/236 [==============================] - 0s 93us/step - loss: 0.2396 - accuracy: 0.9534 - val_loss: 0.6646 - val_accuracy: 0.7848
Epoch 57/200
236/236 [==============================] - 0s 97us/step - loss: 0.2384 - accuracy: 0.9534 - val_loss: 0.6649 - val_accuracy: 0.7848
Epoch 58/200
236/236 [==============================] - 0s 93us/step - loss: 0.2379 - accuracy: 0.9534 - val_loss: 0.6640 - val_accuracy: 0.7848
Epoch 59/200
236/236 [==============================] - 0s 97us/step - loss: 0.2368 - accuracy: 0.9534 - val_loss: 0.6655 - val_accuracy: 0.7848
Epoch 60/200
236/236 [==============================] - 0s 97us/step - loss: 0.2360 - accuracy: 0.9534 - val_loss: 0.6654 - val_accuracy: 0.7848
Epoch 61/200
236/236 [==============================] - 0s 93us/step - loss: 0.2345 - accuracy: 0.9534 - val_loss: 0.6652 - val_accuracy: 0.7848
Epoch 62/200
236/236 [==============================] - 0s 97us/step - loss: 0.2337 - accuracy: 0.9534 - val_loss: 0.6654 - val_accuracy: 0.7848
Epoch 63/200
236/236 [==============================] - 0s 93us/step - loss: 0.2328 - accuracy: 0.9576 - val_loss: 0.6666 - val_accuracy: 0.7848
Epoch 64/200
236/236 [==============================] - 0s 110us/step - loss: 0.2317 - accuracy: 0.9576 - val_loss: 0.6632 - val_accuracy: 0.7848

Epoch 00064: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 65/200
236/236 [==============================] - 0s 93us/step - loss: 0.2305 - accuracy: 0.9576 - val_loss: 0.6640 - val_accuracy: 0.7848
Epoch 66/200
236/236 [==============================] - 0s 144us/step - loss: 0.2300 - accuracy: 0.9576 - val_loss: 0.6646 - val_accuracy: 0.7848
Epoch 67/200
236/236 [==============================] - 0s 97us/step - loss: 0.2294 - accuracy: 0.9534 - val_loss: 0.6643 - val_accuracy: 0.7848
Epoch 68/200
236/236 [==============================] - 0s 110us/step - loss: 0.2289 - accuracy: 0.9576 - val_loss: 0.6639 - val_accuracy: 0.7848
Epoch 69/200
236/236 [==============================] - 0s 97us/step - loss: 0.2284 - accuracy: 0.9576 - val_loss: 0.6654 - val_accuracy: 0.7848
Epoch 70/200
236/236 [==============================] - 0s 97us/step - loss: 0.2280 - accuracy: 0.9534 - val_loss: 0.6664 - val_accuracy: 0.7722
Epoch 71/200
236/236 [==============================] - 0s 97us/step - loss: 0.2273 - accuracy: 0.9534 - val_loss: 0.6677 - val_accuracy: 0.7722
Epoch 72/200
236/236 [==============================] - 0s 93us/step - loss: 0.2268 - accuracy: 0.9534 - val_loss: 0.6696 - val_accuracy: 0.7722
Epoch 73/200
236/236 [==============================] - 0s 97us/step - loss: 0.2265 - accuracy: 0.9534 - val_loss: 0.6711 - val_accuracy: 0.7722
Epoch 74/200
236/236 [==============================] - 0s 89us/step - loss: 0.2258 - accuracy: 0.9534 - val_loss: 0.6716 - val_accuracy: 0.7848

Epoch 00074: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 75/200
236/236 [==============================] - 0s 97us/step - loss: 0.2253 - accuracy: 0.9576 - val_loss: 0.6726 - val_accuracy: 0.7848
Epoch 76/200
236/236 [==============================] - 0s 97us/step - loss: 0.2251 - accuracy: 0.9576 - val_loss: 0.6724 - val_accuracy: 0.7848
Epoch 77/200
236/236 [==============================] - 0s 97us/step - loss: 0.2249 - accuracy: 0.9576 - val_loss: 0.6721 - val_accuracy: 0.7848
Epoch 78/200
236/236 [==============================] - 0s 97us/step - loss: 0.2246 - accuracy: 0.9576 - val_loss: 0.6725 - val_accuracy: 0.7848
Epoch 79/200
236/236 [==============================] - 0s 102us/step - loss: 0.2244 - accuracy: 0.9576 - val_loss: 0.6729 - val_accuracy: 0.7848
Epoch 80/200
236/236 [==============================] - 0s 110us/step - loss: 0.2242 - accuracy: 0.9576 - val_loss: 0.6735 - val_accuracy: 0.7848
Epoch 81/200
236/236 [==============================] - 0s 97us/step - loss: 0.2239 - accuracy: 0.9576 - val_loss: 0.6747 - val_accuracy: 0.7848
Epoch 82/200
236/236 [==============================] - 0s 93us/step - loss: 0.2237 - accuracy: 0.9576 - val_loss: 0.6743 - val_accuracy: 0.7848
Epoch 83/200
236/236 [==============================] - 0s 85us/step - loss: 0.2235 - accuracy: 0.9576 - val_loss: 0.6750 - val_accuracy: 0.7848
Epoch 84/200
236/236 [==============================] - 0s 97us/step - loss: 0.2232 - accuracy: 0.9576 - val_loss: 0.6752 - val_accuracy: 0.7848

Epoch 00084: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 85/200
236/236 [==============================] - 0s 93us/step - loss: 0.2229 - accuracy: 0.9576 - val_loss: 0.6753 - val_accuracy: 0.7848
Epoch 86/200
236/236 [==============================] - 0s 102us/step - loss: 0.2229 - accuracy: 0.9576 - val_loss: 0.6756 - val_accuracy: 0.7848
Epoch 87/200
236/236 [==============================] - 0s 89us/step - loss: 0.2227 - accuracy: 0.9576 - val_loss: 0.6755 - val_accuracy: 0.7848
Epoch 88/200
236/236 [==============================] - 0s 106us/step - loss: 0.2226 - accuracy: 0.9576 - val_loss: 0.6756 - val_accuracy: 0.7848
Epoch 89/200
236/236 [==============================] - 0s 93us/step - loss: 0.2225 - accuracy: 0.9576 - val_loss: 0.6758 - val_accuracy: 0.7848
Epoch 90/200
236/236 [==============================] - 0s 97us/step - loss: 0.2224 - accuracy: 0.9576 - val_loss: 0.6754 - val_accuracy: 0.7848
Epoch 91/200
236/236 [==============================] - 0s 89us/step - loss: 0.2223 - accuracy: 0.9576 - val_loss: 0.6754 - val_accuracy: 0.7848
Epoch 92/200
236/236 [==============================] - 0s 93us/step - loss: 0.2221 - accuracy: 0.9576 - val_loss: 0.6756 - val_accuracy: 0.7848
Epoch 93/200
236/236 [==============================] - 0s 85us/step - loss: 0.2221 - accuracy: 0.9576 - val_loss: 0.6755 - val_accuracy: 0.7848
Epoch 94/200
236/236 [==============================] - 0s 102us/step - loss: 0.2219 - accuracy: 0.9576 - val_loss: 0.6755 - val_accuracy: 0.7848

Epoch 00094: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 95/200
236/236 [==============================] - 0s 114us/step - loss: 0.2217 - accuracy: 0.9576 - val_loss: 0.6755 - val_accuracy: 0.7848
Epoch 96/200
236/236 [==============================] - 0s 106us/step - loss: 0.2217 - accuracy: 0.9576 - val_loss: 0.6755 - val_accuracy: 0.7848
Epoch 97/200
236/236 [==============================] - 0s 93us/step - loss: 0.2216 - accuracy: 0.9576 - val_loss: 0.6757 - val_accuracy: 0.7848
Epoch 98/200
236/236 [==============================] - 0s 106us/step - loss: 0.2215 - accuracy: 0.9576 - val_loss: 0.6757 - val_accuracy: 0.7848
Epoch 99/200
236/236 [==============================] - 0s 102us/step - loss: 0.2215 - accuracy: 0.9576 - val_loss: 0.6757 - val_accuracy: 0.7848
Epoch 100/200
236/236 [==============================] - 0s 89us/step - loss: 0.2214 - accuracy: 0.9576 - val_loss: 0.6759 - val_accuracy: 0.7848
Epoch 101/200
236/236 [==============================] - 0s 106us/step - loss: 0.2214 - accuracy: 0.9576 - val_loss: 0.6760 - val_accuracy: 0.7848
Epoch 102/200
236/236 [==============================] - 0s 93us/step - loss: 0.2213 - accuracy: 0.9576 - val_loss: 0.6760 - val_accuracy: 0.7848
Epoch 103/200
236/236 [==============================] - 0s 93us/step - loss: 0.2212 - accuracy: 0.9576 - val_loss: 0.6760 - val_accuracy: 0.7848
Epoch 104/200
236/236 [==============================] - 0s 110us/step - loss: 0.2212 - accuracy: 0.9576 - val_loss: 0.6763 - val_accuracy: 0.7848

Epoch 00104: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 105/200
236/236 [==============================] - 0s 102us/step - loss: 0.2211 - accuracy: 0.9576 - val_loss: 0.6764 - val_accuracy: 0.7848
Epoch 106/200
236/236 [==============================] - 0s 106us/step - loss: 0.2211 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 107/200
236/236 [==============================] - 0s 110us/step - loss: 0.2211 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 108/200
236/236 [==============================] - 0s 97us/step - loss: 0.2210 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 109/200
236/236 [==============================] - 0s 97us/step - loss: 0.2210 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 110/200
236/236 [==============================] - 0s 102us/step - loss: 0.2210 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 111/200
236/236 [==============================] - 0s 106us/step - loss: 0.2209 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 112/200
236/236 [==============================] - 0s 93us/step - loss: 0.2209 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 113/200
236/236 [==============================] - 0s 106us/step - loss: 0.2209 - accuracy: 0.9576 - val_loss: 0.6765 - val_accuracy: 0.7848
Epoch 114/200
236/236 [==============================] - 0s 89us/step - loss: 0.2208 - accuracy: 0.9576 - val_loss: 0.6764 - val_accuracy: 0.7848

Epoch 00114: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 115/200
236/236 [==============================] - 0s 97us/step - loss: 0.2208 - accuracy: 0.9576 - val_loss: 0.6764 - val_accuracy: 0.7848
Epoch 116/200
236/236 [==============================] - 0s 89us/step - loss: 0.2208 - accuracy: 0.9576 - val_loss: 0.6764 - val_accuracy: 0.7848
Epoch 117/200
236/236 [==============================] - 0s 102us/step - loss: 0.2208 - accuracy: 0.9576 - val_loss: 0.6764 - val_accuracy: 0.7848
Epoch 118/200
236/236 [==============================] - 0s 93us/step - loss: 0.2208 - accuracy: 0.9576 - val_loss: 0.6764 - val_accuracy: 0.7848
Epoch 119/200
236/236 [==============================] - 0s 119us/step - loss: 0.2207 - accuracy: 0.9576 - val_loss: 0.6763 - val_accuracy: 0.7848
Epoch 120/200
236/236 [==============================] - 0s 114us/step - loss: 0.2207 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 121/200
236/236 [==============================] - 0s 110us/step - loss: 0.2207 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 122/200
236/236 [==============================] - 0s 106us/step - loss: 0.2207 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 123/200
236/236 [==============================] - 0s 97us/step - loss: 0.2207 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 124/200
236/236 [==============================] - 0s 93us/step - loss: 0.2207 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00124: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 125/200
236/236 [==============================] - 0s 93us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 126/200
236/236 [==============================] - 0s 89us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 127/200
236/236 [==============================] - 0s 97us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 128/200
236/236 [==============================] - 0s 93us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 129/200
236/236 [==============================] - 0s 102us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 130/200
236/236 [==============================] - 0s 93us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 131/200
236/236 [==============================] - 0s 106us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 132/200
236/236 [==============================] - 0s 97us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 133/200
236/236 [==============================] - 0s 93us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 134/200
236/236 [==============================] - 0s 89us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00134: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 135/200
236/236 [==============================] - 0s 93us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 136/200
236/236 [==============================] - 0s 97us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 137/200
236/236 [==============================] - 0s 89us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 138/200
236/236 [==============================] - 0s 97us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 139/200
236/236 [==============================] - 0s 89us/step - loss: 0.2206 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 140/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 141/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 142/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 143/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 144/200
236/236 [==============================] - 0s 97us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00144: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 145/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 146/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 147/200
236/236 [==============================] - 0s 85us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 148/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 149/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 150/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 151/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 152/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 153/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 154/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00154: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 155/200
236/236 [==============================] - 0s 97us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 156/200
236/236 [==============================] - 0s 106us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 157/200
236/236 [==============================] - 0s 106us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 158/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 159/200
236/236 [==============================] - 0s 85us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 160/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 161/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 162/200
236/236 [==============================] - 0s 97us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 163/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 164/200
236/236 [==============================] - 0s 97us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00164: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 165/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 166/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 167/200
236/236 [==============================] - 0s 85us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 168/200
236/236 [==============================] - 0s 85us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 169/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 170/200
236/236 [==============================] - 0s 85us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 171/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 172/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 173/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 174/200
236/236 [==============================] - ETA: 0s - loss: 0.2723 - accuracy: 0.96 - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00174: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 175/200
236/236 [==============================] - 0s 110us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 176/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 177/200
236/236 [==============================] - 0s 106us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 178/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 179/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 180/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 181/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 182/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 183/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 184/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00184: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 185/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 186/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 187/200
236/236 [==============================] - 0s 97us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 188/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 189/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 190/200
236/236 [==============================] - 0s 89us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 191/200
236/236 [==============================] - 0s 106us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 192/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 193/200
236/236 [==============================] - 0s 106us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 194/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848

Epoch 00194: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 195/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 196/200
236/236 [==============================] - 0s 106us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 197/200
236/236 [==============================] - 0s 123us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 198/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 199/200
236/236 [==============================] - 0s 102us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
Epoch 200/200
236/236 [==============================] - 0s 93us/step - loss: 0.2205 - accuracy: 0.9576 - val_loss: 0.6762 - val_accuracy: 0.7848
In [270]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 200)
In [271]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
79/79 [==============================] - 0s 89us/step
test loss: 0.6762231539321851, test accuracy: 0.7848101258277893
In [272]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6412151067323482
In [273]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.36138849262957684
[[54  4]
 [13  8]]

KMeans

In [406]:
X
Out[406]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12
0 1.713160 0.038243 2.143431 0.466379 2.480688 1.533552 -0.564788 -0.236267 -0.737672 0.470152 -0.935468 0.074931
1 1.315247 0.387677 1.394548 -0.542923 -0.194326 0.100084 0.191568 1.691930 -0.164059 0.599296 -0.175780 0.267042
2 1.737505 -1.440944 -1.114255 0.326790 -1.157732 -1.261521 -1.130503 1.585017 -1.367743 -1.287569 -0.805298 -1.676436
3 1.737506 -1.127088 -0.672464 0.505231 -0.830959 -1.046716 -0.912922 1.082754 -0.919242 -0.761758 -1.023346 -1.220716
4 1.737506 1.455410 0.333584 1.122628 1.139134 -0.481280 0.129896 0.915597 0.333032 -0.289806 -0.236639 -0.281891
5 -1.313694 -1.631596 -0.954634 2.545293 -0.841431 -0.880402 1.098767 0.874224 -1.935272 -1.777115 -1.259928 -1.270550
6 0.343081 -0.554660 -0.305290 0.059946 -0.331585 0.008117 -0.092598 0.086112 1.237321 -0.237146 0.239978 1.617512
7 1.109261 -0.479083 -0.201140 1.561772 -0.314776 2.324993 -0.123793 1.681972 0.723606 1.281028 1.332118 1.616723
8 1.737506 0.431527 -0.310582 -0.122085 -0.401366 1.109393 -0.731551 -0.284884 -0.401724 0.343534 0.512560 2.039796
9 -1.315956 -1.270342 -0.635019 -0.965063 -0.677128 -0.999133 -0.673936 0.377158 -0.740151 0.479661 1.359663 -0.539737
10 1.592092 0.167770 1.676372 0.444937 1.198262 1.558372 0.695811 0.857869 0.417106 1.008119 1.359663 1.013913
11 1.737506 0.061168 -0.198190 -0.277339 -0.315128 -0.260607 -0.037409 0.713144 -0.442008 -0.420659 -0.067628 1.042894
12 1.047772 0.511390 0.649199 -0.733636 -0.679537 -1.048667 -0.618650 -1.049439 1.237321 0.935768 0.609736 1.089900
13 0.985207 0.757573 0.728808 0.148076 -0.402837 -0.657586 -0.341424 0.352814 1.237320 0.537897 -0.314771 1.274813
14 -1.092408 0.724395 -0.854683 -1.339781 -1.038651 -0.146198 -1.046078 -0.968884 -0.640529 -0.662849 1.359664 -0.842083
15 -0.257098 -0.953113 -0.826836 -1.058296 -1.143706 -1.295604 -0.932057 1.298111 0.062365 0.078659 1.359663 -0.414103
16 -1.038548 -1.285038 -0.897336 -0.925038 -1.321623 -0.675496 -1.174686 -0.220940 -1.236805 -0.992367 1.359664 -0.452244
17 -0.743373 -1.006359 -0.304526 -0.673197 2.468788 1.394753 2.061863 -1.147252 -0.314785 -0.803420 -0.543555 2.127294
18 -0.506076 0.745202 0.557874 -0.551711 2.482435 1.618435 3.352869 -0.896426 0.632864 -0.504095 -0.396771 1.961034
19 -0.619836 -0.900602 0.192155 -0.280406 1.602038 1.570403 3.352869 -1.028553 -0.325763 -0.694475 -0.448468 2.112782
20 0.236496 1.024573 2.143431 2.184352 -1.309784 1.967834 -1.174015 -1.738864 -1.705683 -1.658302 1.034240 -1.039738
21 -0.969823 0.186258 -0.649182 -0.822502 -1.173086 -0.203826 -1.106955 -1.095550 0.611256 -1.183571 1.359663 -0.954603
22 -0.477230 -0.147047 -0.472900 -0.603956 -1.185349 0.835564 -0.993918 -1.280801 1.237321 -0.492384 1.208362 -0.579080
23 -0.402673 -1.613310 0.859201 -1.462753 -1.304385 1.048578 -1.175049 -1.320789 -2.133693 0.809224 1.359663 -1.698122
24 1.036252 -0.180424 2.143430 -0.530143 -0.581394 0.192607 0.189292 1.455548 0.372539 1.118521 -0.352066 0.295735
25 1.009571 -0.114262 0.491121 -0.494523 -0.560272 -0.489051 0.310555 -0.762806 0.638941 1.281027 -0.377793 0.160975
26 1.737505 -0.691965 0.794800 -0.996395 -0.859283 0.108625 -0.179964 -0.404455 -0.407132 0.559305 0.490880 -0.690673
27 0.952878 -0.600113 1.564652 -0.647102 -0.354733 0.741931 -0.850897 -0.165104 -1.270161 1.281028 0.689314 -0.739641
28 0.090687 -1.135658 1.025879 0.173394 0.141333 0.938735 -0.460724 1.033120 -0.675812 1.016227 1.359663 -0.918936
29 1.334056 1.036213 0.310885 1.388872 0.536826 1.066414 2.035180 1.368130 1.215360 1.281028 0.571847 1.089988
... ... ... ... ... ... ... ... ... ... ... ... ...
285 -0.818809 0.795687 -0.418744 -0.981876 0.290435 -1.082166 0.916788 -1.121030 -0.872233 -0.467637 -0.673139 2.127294
286 -1.350808 -0.132215 1.939439 -0.255670 1.080433 -0.906077 -0.076483 1.691930 -1.760598 -0.831051 -1.664177 0.190914
287 -0.268283 -1.246560 -0.948278 -1.092787 -0.912773 -1.286685 -1.170304 -1.398130 -0.955533 1.281028 -1.249300 -1.331567
288 -1.499565 -1.477911 -1.400613 -1.454657 -0.189787 -1.235822 -1.124408 -1.277436 -1.382225 1.281028 -1.456827 -1.039202
289 -0.316037 -1.101863 -0.979251 -0.947212 -0.382959 -1.211602 -1.164073 -0.572011 -0.586600 1.281028 -1.035303 -1.071429
290 -1.429079 -0.014357 -0.502745 -0.434442 2.818635 1.078430 2.198959 -1.053548 -2.088193 -1.732987 -1.761028 -0.159791
291 0.142459 0.454640 -1.378454 1.070823 -1.223040 2.764417 1.542144 -1.706716 0.232404 -1.955043 0.868232 -1.674045
292 0.369579 0.015402 -0.629733 2.545293 -0.222337 1.917223 0.555192 -1.194461 0.161682 -1.235773 0.462188 -0.512327
293 1.737506 -0.048004 1.588206 -0.467110 0.510737 0.554321 0.532244 1.152385 -0.138238 0.923415 -0.494020 0.305075
294 -0.072827 -0.808718 2.143431 -1.069392 0.333942 1.075808 -0.574391 0.052452 -1.091144 1.209060 -1.203635 -0.631894
295 1.096294 -0.192606 0.838680 -0.968919 0.814071 1.951969 -0.304336 -0.213012 -0.860743 1.281028 -0.964629 -0.039173
296 1.197861 -0.481339 -0.039784 0.576523 0.719339 1.230426 -0.013807 0.576144 1.237321 0.827248 0.863710 1.347974
297 0.831369 -0.845061 -0.412444 -0.202774 0.499287 0.798141 1.143779 0.135898 0.903542 1.103870 1.359663 1.309758
298 1.129174 -0.778390 -0.347478 0.028754 0.896022 1.021079 1.609960 0.261949 0.607068 1.111758 1.359663 1.623317
299 1.235661 0.246339 -0.055182 -0.274201 -0.453087 -0.448112 -0.693517 -0.103535 1.237321 0.849533 0.778756 0.601407
300 1.617641 0.157580 0.320652 -0.272952 0.399767 0.391633 -0.493105 0.638211 0.910413 1.281028 0.611215 0.781573
301 0.904215 -0.238528 0.650660 -0.588166 0.058915 -0.260010 -0.734161 0.076079 0.334341 1.281028 -0.009255 0.453380
302 -1.571329 2.288385 -0.858389 1.605747 0.877803 -1.392311 -0.299009 -1.732948 0.377403 -1.348885 -1.797949 0.161259
303 -1.111497 1.012712 -0.373804 -0.488606 -0.534590 -1.065890 -0.687799 -1.079827 1.237321 -0.872941 -1.357570 -1.172663
304 -0.632774 2.288385 1.193051 0.583811 1.042355 -0.849096 1.848927 -1.561165 0.223166 -0.146801 -1.172130 -0.154950
305 -1.675063 0.049104 -0.121444 -1.322550 -0.298216 -1.383538 3.352869 -0.922058 -2.012971 -0.306843 -1.798988 0.307357
306 -1.675063 -0.302934 1.841519 -1.463728 0.044986 -1.392689 2.251379 0.179143 -2.131740 1.281028 -1.476204 1.119567
307 -1.675063 -0.345332 0.759301 -1.463728 -0.944610 -1.392689 2.839571 -1.657298 -2.134915 1.281027 -1.798988 -0.818327
308 -0.701240 -0.718707 2.143431 -0.317960 0.753769 0.883849 0.080036 0.067906 -1.106120 0.663348 -0.295994 -0.620494
309 0.283947 -0.770573 2.143431 -0.677678 0.254128 1.306505 -0.471240 1.183162 -1.285117 0.322375 0.125126 -0.725452
310 -0.684797 -0.424879 1.965265 0.285506 1.654633 1.714850 0.379435 0.747071 -0.436120 1.281028 0.814894 -0.455133
311 -0.063614 0.387844 1.083088 -0.000037 0.092909 0.624221 0.267615 1.377100 1.237321 0.720428 0.122085 0.422431
312 1.272215 0.276107 2.143431 -0.015484 1.545799 1.751362 0.646849 1.047746 0.159537 0.294111 -0.397554 0.554605
313 -1.349102 0.408011 -0.212504 2.027495 1.056566 -0.074589 0.348765 1.691929 1.002332 0.337931 0.708780 -0.469953
314 0.377466 -0.342265 -0.366249 -0.135576 -0.759547 1.018877 -0.961553 1.691929 0.534889 0.262599 -0.220736 0.127432

315 rows × 12 columns

In [407]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[407]:
[3780.0,
 3195.1235725667257,
 2811.651587927651,
 2629.426866311046,
 2498.0440346782034,
 2372.504055975522,
 2279.167916120524,
 2172.592510139712,
 2092.9155365617485,
 2035.7890606826481,
 1937.9162920732826,
 1905.3053295274888,
 1820.7622703061425,
 1785.824822903035]
In [408]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[408]:
[<matplotlib.lines.Line2D at 0x1b82d788048>]

K=3

In [409]:
kmeans_ch = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[409]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=3, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [410]:
kmeans_ch.labels_
Out[410]:
array([1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 2, 2, 2, 0, 0,
       0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 2, 0, 2, 0, 2, 1, 1, 1, 0, 0, 1,
       0, 0, 0, 0, 1, 2, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 2, 0,
       1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2,
       2, 1, 0, 1, 0, 2, 2, 2, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,
       1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 1, 1, 1, 0, 0, 0, 1, 0,
       0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 1, 0, 0, 1,
       1, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0,
       1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 2, 0, 0, 0,
       0, 0, 1, 0, 2, 0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 0, 0, 0,
       1, 1, 2, 2, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 0, 1, 1, 0, 0,
       1, 1, 1, 0, 1, 1, 2, 2, 0, 1, 1, 0, 1, 1, 1, 0, 1, 2, 0, 0, 0, 0,
       0, 1, 2, 2, 0, 2, 0, 1, 0, 1, 0, 1, 0, 0, 2, 2, 2, 2, 2, 1, 2, 2,
       2, 0, 0, 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 2, 2, 2, 2,
       1, 1, 1, 1, 1, 1, 1])
In [411]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[411]:
array([1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 2, 2, 2, 0, 0,
       0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 2, 0, 2, 0, 2, 1, 1, 1, 0, 0, 1,
       0, 0, 0, 0, 1, 2, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 2, 0,
       1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2,
       2, 1, 0, 1, 0, 2, 2, 2, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,
       1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 1, 1, 1, 0, 0, 0, 1, 0,
       0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 1, 0, 0, 1,
       1, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0,
       1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 2, 0, 0, 0,
       0, 0, 1, 0, 2, 0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 0, 0, 0,
       1, 1, 2, 2, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 0, 1, 1, 0, 0,
       1, 1, 1, 0, 1, 1, 2, 2, 0, 1, 1, 0, 1, 1, 1, 0, 1, 2, 0, 0, 0, 0,
       0, 1, 2, 2, 0, 2, 0, 1, 0, 1, 0, 1, 0, 0, 2, 2, 2, 2, 2, 1, 2, 2,
       2, 0, 0, 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 2, 2, 2, 2,
       1, 1, 1, 1, 1, 1, 1])
In [412]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [413]:
X
Out[413]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 Cluster chosen
0 1.713160 0.038243 2.143431 0.466379 2.480688 1.533552 -0.564788 -0.236267 -0.737672 0.470152 -0.935468 0.074931 1 0
1 1.315247 0.387677 1.394548 -0.542923 -0.194326 0.100084 0.191568 1.691930 -0.164059 0.599296 -0.175780 0.267042 1 0
2 1.737505 -1.440944 -1.114255 0.326790 -1.157732 -1.261521 -1.130503 1.585017 -1.367743 -1.287569 -0.805298 -1.676436 0 0
3 1.737506 -1.127088 -0.672464 0.505231 -0.830959 -1.046716 -0.912922 1.082754 -0.919242 -0.761758 -1.023346 -1.220716 0 0
4 1.737506 1.455410 0.333584 1.122628 1.139134 -0.481280 0.129896 0.915597 0.333032 -0.289806 -0.236639 -0.281891 1 0
5 -1.313694 -1.631596 -0.954634 2.545293 -0.841431 -0.880402 1.098767 0.874224 -1.935272 -1.777115 -1.259928 -1.270550 0 0
6 0.343081 -0.554660 -0.305290 0.059946 -0.331585 0.008117 -0.092598 0.086112 1.237321 -0.237146 0.239978 1.617512 1 0
7 1.109261 -0.479083 -0.201140 1.561772 -0.314776 2.324993 -0.123793 1.681972 0.723606 1.281028 1.332118 1.616723 1 0
8 1.737506 0.431527 -0.310582 -0.122085 -0.401366 1.109393 -0.731551 -0.284884 -0.401724 0.343534 0.512560 2.039796 1 0
9 -1.315956 -1.270342 -0.635019 -0.965063 -0.677128 -0.999133 -0.673936 0.377158 -0.740151 0.479661 1.359663 -0.539737 0 0
10 1.592092 0.167770 1.676372 0.444937 1.198262 1.558372 0.695811 0.857869 0.417106 1.008119 1.359663 1.013913 1 0
11 1.737506 0.061168 -0.198190 -0.277339 -0.315128 -0.260607 -0.037409 0.713144 -0.442008 -0.420659 -0.067628 1.042894 1 0
12 1.047772 0.511390 0.649199 -0.733636 -0.679537 -1.048667 -0.618650 -1.049439 1.237321 0.935768 0.609736 1.089900 1 0
13 0.985207 0.757573 0.728808 0.148076 -0.402837 -0.657586 -0.341424 0.352814 1.237320 0.537897 -0.314771 1.274813 1 0
14 -1.092408 0.724395 -0.854683 -1.339781 -1.038651 -0.146198 -1.046078 -0.968884 -0.640529 -0.662849 1.359664 -0.842083 0 0
15 -0.257098 -0.953113 -0.826836 -1.058296 -1.143706 -1.295604 -0.932057 1.298111 0.062365 0.078659 1.359663 -0.414103 0 0
16 -1.038548 -1.285038 -0.897336 -0.925038 -1.321623 -0.675496 -1.174686 -0.220940 -1.236805 -0.992367 1.359664 -0.452244 0 0
17 -0.743373 -1.006359 -0.304526 -0.673197 2.468788 1.394753 2.061863 -1.147252 -0.314785 -0.803420 -0.543555 2.127294 2 0
18 -0.506076 0.745202 0.557874 -0.551711 2.482435 1.618435 3.352869 -0.896426 0.632864 -0.504095 -0.396771 1.961034 2 0
19 -0.619836 -0.900602 0.192155 -0.280406 1.602038 1.570403 3.352869 -1.028553 -0.325763 -0.694475 -0.448468 2.112782 2 0
20 0.236496 1.024573 2.143431 2.184352 -1.309784 1.967834 -1.174015 -1.738864 -1.705683 -1.658302 1.034240 -1.039738 0 0
21 -0.969823 0.186258 -0.649182 -0.822502 -1.173086 -0.203826 -1.106955 -1.095550 0.611256 -1.183571 1.359663 -0.954603 0 0
22 -0.477230 -0.147047 -0.472900 -0.603956 -1.185349 0.835564 -0.993918 -1.280801 1.237321 -0.492384 1.208362 -0.579080 0 0
23 -0.402673 -1.613310 0.859201 -1.462753 -1.304385 1.048578 -1.175049 -1.320789 -2.133693 0.809224 1.359663 -1.698122 0 0
24 1.036252 -0.180424 2.143430 -0.530143 -0.581394 0.192607 0.189292 1.455548 0.372539 1.118521 -0.352066 0.295735 1 0
25 1.009571 -0.114262 0.491121 -0.494523 -0.560272 -0.489051 0.310555 -0.762806 0.638941 1.281027 -0.377793 0.160975 1 0
26 1.737505 -0.691965 0.794800 -0.996395 -0.859283 0.108625 -0.179964 -0.404455 -0.407132 0.559305 0.490880 -0.690673 0 0
27 0.952878 -0.600113 1.564652 -0.647102 -0.354733 0.741931 -0.850897 -0.165104 -1.270161 1.281028 0.689314 -0.739641 1 0
28 0.090687 -1.135658 1.025879 0.173394 0.141333 0.938735 -0.460724 1.033120 -0.675812 1.016227 1.359663 -0.918936 1 0
29 1.334056 1.036213 0.310885 1.388872 0.536826 1.066414 2.035180 1.368130 1.215360 1.281028 0.571847 1.089988 1 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
285 -0.818809 0.795687 -0.418744 -0.981876 0.290435 -1.082166 0.916788 -1.121030 -0.872233 -0.467637 -0.673139 2.127294 2 1
286 -1.350808 -0.132215 1.939439 -0.255670 1.080433 -0.906077 -0.076483 1.691930 -1.760598 -0.831051 -1.664177 0.190914 2 1
287 -0.268283 -1.246560 -0.948278 -1.092787 -0.912773 -1.286685 -1.170304 -1.398130 -0.955533 1.281028 -1.249300 -1.331567 0 1
288 -1.499565 -1.477911 -1.400613 -1.454657 -0.189787 -1.235822 -1.124408 -1.277436 -1.382225 1.281028 -1.456827 -1.039202 0 1
289 -0.316037 -1.101863 -0.979251 -0.947212 -0.382959 -1.211602 -1.164073 -0.572011 -0.586600 1.281028 -1.035303 -1.071429 0 1
290 -1.429079 -0.014357 -0.502745 -0.434442 2.818635 1.078430 2.198959 -1.053548 -2.088193 -1.732987 -1.761028 -0.159791 2 1
291 0.142459 0.454640 -1.378454 1.070823 -1.223040 2.764417 1.542144 -1.706716 0.232404 -1.955043 0.868232 -1.674045 0 1
292 0.369579 0.015402 -0.629733 2.545293 -0.222337 1.917223 0.555192 -1.194461 0.161682 -1.235773 0.462188 -0.512327 1 1
293 1.737506 -0.048004 1.588206 -0.467110 0.510737 0.554321 0.532244 1.152385 -0.138238 0.923415 -0.494020 0.305075 1 1
294 -0.072827 -0.808718 2.143431 -1.069392 0.333942 1.075808 -0.574391 0.052452 -1.091144 1.209060 -1.203635 -0.631894 1 1
295 1.096294 -0.192606 0.838680 -0.968919 0.814071 1.951969 -0.304336 -0.213012 -0.860743 1.281028 -0.964629 -0.039173 1 1
296 1.197861 -0.481339 -0.039784 0.576523 0.719339 1.230426 -0.013807 0.576144 1.237321 0.827248 0.863710 1.347974 1 1
297 0.831369 -0.845061 -0.412444 -0.202774 0.499287 0.798141 1.143779 0.135898 0.903542 1.103870 1.359663 1.309758 1 1
298 1.129174 -0.778390 -0.347478 0.028754 0.896022 1.021079 1.609960 0.261949 0.607068 1.111758 1.359663 1.623317 1 1
299 1.235661 0.246339 -0.055182 -0.274201 -0.453087 -0.448112 -0.693517 -0.103535 1.237321 0.849533 0.778756 0.601407 1 1
300 1.617641 0.157580 0.320652 -0.272952 0.399767 0.391633 -0.493105 0.638211 0.910413 1.281028 0.611215 0.781573 1 1
301 0.904215 -0.238528 0.650660 -0.588166 0.058915 -0.260010 -0.734161 0.076079 0.334341 1.281028 -0.009255 0.453380 1 1
302 -1.571329 2.288385 -0.858389 1.605747 0.877803 -1.392311 -0.299009 -1.732948 0.377403 -1.348885 -1.797949 0.161259 2 1
303 -1.111497 1.012712 -0.373804 -0.488606 -0.534590 -1.065890 -0.687799 -1.079827 1.237321 -0.872941 -1.357570 -1.172663 0 1
304 -0.632774 2.288385 1.193051 0.583811 1.042355 -0.849096 1.848927 -1.561165 0.223166 -0.146801 -1.172130 -0.154950 2 1
305 -1.675063 0.049104 -0.121444 -1.322550 -0.298216 -1.383538 3.352869 -0.922058 -2.012971 -0.306843 -1.798988 0.307357 2 1
306 -1.675063 -0.302934 1.841519 -1.463728 0.044986 -1.392689 2.251379 0.179143 -2.131740 1.281028 -1.476204 1.119567 2 1
307 -1.675063 -0.345332 0.759301 -1.463728 -0.944610 -1.392689 2.839571 -1.657298 -2.134915 1.281027 -1.798988 -0.818327 2 1
308 -0.701240 -0.718707 2.143431 -0.317960 0.753769 0.883849 0.080036 0.067906 -1.106120 0.663348 -0.295994 -0.620494 1 1
309 0.283947 -0.770573 2.143431 -0.677678 0.254128 1.306505 -0.471240 1.183162 -1.285117 0.322375 0.125126 -0.725452 1 1
310 -0.684797 -0.424879 1.965265 0.285506 1.654633 1.714850 0.379435 0.747071 -0.436120 1.281028 0.814894 -0.455133 1 1
311 -0.063614 0.387844 1.083088 -0.000037 0.092909 0.624221 0.267615 1.377100 1.237321 0.720428 0.122085 0.422431 1 1
312 1.272215 0.276107 2.143431 -0.015484 1.545799 1.751362 0.646849 1.047746 0.159537 0.294111 -0.397554 0.554605 1 1
313 -1.349102 0.408011 -0.212504 2.027495 1.056566 -0.074589 0.348765 1.691929 1.002332 0.337931 0.708780 -0.469953 1 1
314 0.377466 -0.342265 -0.366249 -0.135576 -0.759547 1.018877 -0.961553 1.691929 0.534889 0.262599 -0.220736 0.127432 1 1

315 rows × 14 columns

In [414]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[414]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b82f796be0>
In [415]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

Club De Banqueros y Empresarios

ANN

In [274]:
X = df_n_ps_std_ch[1]
In [275]:
y = df_n_ps[1]['chosen']
In [276]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [277]:
X_train.shape
Out[277]:
(191, 12)
In [278]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [279]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [280]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [423]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (10, 10), 'learning_rate_init': 0.003, 'max_iter': 200}, que permiten obtener un Accuracy de 76.96% y un Kappa del 34.08
Tiempo total: 25.99 minutos
In [281]:
grid.best_params_ = {'activation': 'tanh', 'hidden_layer_sizes': (10, 10), 'learning_rate_init': 0.003, 'max_iter': 200}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [282]:
input_tensor = Input(shape = (n0,))
In [283]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [284]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [285]:
model.summary()
Model: "model_16"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_17 (InputLayer)        (None, 12)                0         
_________________________________________________________________
dense_46 (Dense)             (None, 10)                130       
_________________________________________________________________
dense_47 (Dense)             (None, 10)                110       
_________________________________________________________________
dense_48 (Dense)             (None, 1)                 11        
=================================================================
Total params: 251
Trainable params: 251
Non-trainable params: 0
_________________________________________________________________
In [286]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 191 samples, validate on 64 samples
Epoch 1/200
191/191 [==============================] - 0s 1ms/step - loss: 0.7087 - accuracy: 0.5288 - val_loss: 0.7177 - val_accuracy: 0.5000
Epoch 2/200
191/191 [==============================] - 0s 84us/step - loss: 0.6727 - accuracy: 0.5916 - val_loss: 0.6935 - val_accuracy: 0.5312
Epoch 3/200
191/191 [==============================] - 0s 84us/step - loss: 0.6477 - accuracy: 0.6440 - val_loss: 0.6732 - val_accuracy: 0.5469
Epoch 4/200
191/191 [==============================] - 0s 89us/step - loss: 0.6268 - accuracy: 0.6859 - val_loss: 0.6566 - val_accuracy: 0.5781
Epoch 5/200
191/191 [==============================] - 0s 89us/step - loss: 0.6068 - accuracy: 0.7068 - val_loss: 0.6425 - val_accuracy: 0.6094
Epoch 6/200
191/191 [==============================] - 0s 89us/step - loss: 0.5911 - accuracy: 0.7173 - val_loss: 0.6319 - val_accuracy: 0.6094
Epoch 7/200
191/191 [==============================] - 0s 89us/step - loss: 0.5783 - accuracy: 0.7225 - val_loss: 0.6250 - val_accuracy: 0.6250
Epoch 8/200
191/191 [==============================] - 0s 84us/step - loss: 0.5665 - accuracy: 0.7277 - val_loss: 0.6181 - val_accuracy: 0.6406
Epoch 9/200
191/191 [==============================] - 0s 78us/step - loss: 0.5562 - accuracy: 0.7277 - val_loss: 0.6133 - val_accuracy: 0.6250
Epoch 10/200
191/191 [==============================] - 0s 84us/step - loss: 0.5488 - accuracy: 0.7435 - val_loss: 0.6092 - val_accuracy: 0.6406
Epoch 11/200
191/191 [==============================] - 0s 94us/step - loss: 0.5408 - accuracy: 0.7487 - val_loss: 0.6059 - val_accuracy: 0.6406
Epoch 12/200
191/191 [==============================] - 0s 89us/step - loss: 0.5354 - accuracy: 0.7435 - val_loss: 0.6040 - val_accuracy: 0.6406
Epoch 13/200
191/191 [==============================] - 0s 94us/step - loss: 0.5295 - accuracy: 0.7435 - val_loss: 0.6033 - val_accuracy: 0.6406
Epoch 14/200
191/191 [==============================] - 0s 99us/step - loss: 0.5244 - accuracy: 0.7487 - val_loss: 0.6023 - val_accuracy: 0.6406
Epoch 15/200
191/191 [==============================] - 0s 89us/step - loss: 0.5197 - accuracy: 0.7487 - val_loss: 0.6011 - val_accuracy: 0.6406
Epoch 16/200
191/191 [==============================] - 0s 94us/step - loss: 0.5159 - accuracy: 0.7487 - val_loss: 0.6002 - val_accuracy: 0.6406
Epoch 17/200
191/191 [==============================] - 0s 84us/step - loss: 0.5119 - accuracy: 0.7487 - val_loss: 0.6004 - val_accuracy: 0.6562
Epoch 18/200
191/191 [==============================] - 0s 84us/step - loss: 0.5089 - accuracy: 0.7539 - val_loss: 0.6029 - val_accuracy: 0.6562
Epoch 19/200
191/191 [==============================] - 0s 89us/step - loss: 0.5054 - accuracy: 0.7487 - val_loss: 0.6033 - val_accuracy: 0.6562
Epoch 20/200
191/191 [==============================] - 0s 94us/step - loss: 0.5018 - accuracy: 0.7487 - val_loss: 0.6033 - val_accuracy: 0.6562
Epoch 21/200
191/191 [==============================] - 0s 89us/step - loss: 0.4993 - accuracy: 0.7487 - val_loss: 0.6037 - val_accuracy: 0.6562
Epoch 22/200
191/191 [==============================] - 0s 84us/step - loss: 0.4957 - accuracy: 0.7487 - val_loss: 0.6056 - val_accuracy: 0.6562
Epoch 23/200
191/191 [==============================] - 0s 105us/step - loss: 0.4934 - accuracy: 0.7539 - val_loss: 0.6100 - val_accuracy: 0.6719
Epoch 24/200
191/191 [==============================] - 0s 99us/step - loss: 0.4887 - accuracy: 0.7487 - val_loss: 0.6113 - val_accuracy: 0.6719
Epoch 25/200
191/191 [==============================] - 0s 89us/step - loss: 0.4866 - accuracy: 0.7435 - val_loss: 0.6083 - val_accuracy: 0.6719
Epoch 26/200
191/191 [==============================] - 0s 84us/step - loss: 0.4831 - accuracy: 0.7435 - val_loss: 0.6124 - val_accuracy: 0.6719
Epoch 27/200
191/191 [==============================] - 0s 89us/step - loss: 0.4802 - accuracy: 0.7487 - val_loss: 0.6134 - val_accuracy: 0.6719
Epoch 28/200
191/191 [==============================] - 0s 89us/step - loss: 0.4769 - accuracy: 0.7487 - val_loss: 0.6106 - val_accuracy: 0.6719
Epoch 29/200
191/191 [==============================] - 0s 84us/step - loss: 0.4738 - accuracy: 0.7539 - val_loss: 0.6102 - val_accuracy: 0.6719
Epoch 30/200
191/191 [==============================] - 0s 89us/step - loss: 0.4712 - accuracy: 0.7539 - val_loss: 0.6098 - val_accuracy: 0.6719
Epoch 31/200
191/191 [==============================] - 0s 89us/step - loss: 0.4673 - accuracy: 0.7487 - val_loss: 0.6118 - val_accuracy: 0.6875
Epoch 32/200
191/191 [==============================] - 0s 89us/step - loss: 0.4650 - accuracy: 0.7487 - val_loss: 0.6131 - val_accuracy: 0.6875
Epoch 33/200
191/191 [==============================] - 0s 99us/step - loss: 0.4620 - accuracy: 0.7592 - val_loss: 0.6143 - val_accuracy: 0.6875
Epoch 34/200
191/191 [==============================] - 0s 84us/step - loss: 0.4588 - accuracy: 0.7592 - val_loss: 0.6144 - val_accuracy: 0.6875
Epoch 35/200
191/191 [==============================] - 0s 94us/step - loss: 0.4564 - accuracy: 0.7592 - val_loss: 0.6145 - val_accuracy: 0.6875
Epoch 36/200
191/191 [==============================] - 0s 89us/step - loss: 0.4530 - accuracy: 0.7644 - val_loss: 0.6145 - val_accuracy: 0.6875
Epoch 37/200
191/191 [==============================] - 0s 94us/step - loss: 0.4513 - accuracy: 0.7592 - val_loss: 0.6170 - val_accuracy: 0.6875
Epoch 38/200
191/191 [==============================] - 0s 105us/step - loss: 0.4475 - accuracy: 0.7592 - val_loss: 0.6191 - val_accuracy: 0.6875
Epoch 39/200
191/191 [==============================] - 0s 120us/step - loss: 0.4449 - accuracy: 0.7644 - val_loss: 0.6166 - val_accuracy: 0.6875
Epoch 40/200
191/191 [==============================] - 0s 94us/step - loss: 0.4427 - accuracy: 0.7696 - val_loss: 0.6174 - val_accuracy: 0.6875
Epoch 41/200
191/191 [==============================] - 0s 89us/step - loss: 0.4406 - accuracy: 0.7749 - val_loss: 0.6191 - val_accuracy: 0.6875

Epoch 00041: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 42/200
191/191 [==============================] - 0s 89us/step - loss: 0.4361 - accuracy: 0.7749 - val_loss: 0.6193 - val_accuracy: 0.6875
Epoch 43/200
191/191 [==============================] - 0s 89us/step - loss: 0.4351 - accuracy: 0.7749 - val_loss: 0.6197 - val_accuracy: 0.6875
Epoch 44/200
191/191 [==============================] - 0s 105us/step - loss: 0.4332 - accuracy: 0.7749 - val_loss: 0.6205 - val_accuracy: 0.6875
Epoch 45/200
191/191 [==============================] - 0s 94us/step - loss: 0.4318 - accuracy: 0.7749 - val_loss: 0.6214 - val_accuracy: 0.6875
Epoch 46/200
191/191 [==============================] - 0s 94us/step - loss: 0.4304 - accuracy: 0.7749 - val_loss: 0.6216 - val_accuracy: 0.6875
Epoch 47/200
191/191 [==============================] - 0s 99us/step - loss: 0.4291 - accuracy: 0.7696 - val_loss: 0.6220 - val_accuracy: 0.6875
Epoch 48/200
191/191 [==============================] - 0s 94us/step - loss: 0.4274 - accuracy: 0.7696 - val_loss: 0.6221 - val_accuracy: 0.6875
Epoch 49/200
191/191 [==============================] - 0s 99us/step - loss: 0.4264 - accuracy: 0.7749 - val_loss: 0.6231 - val_accuracy: 0.6875
Epoch 50/200
191/191 [==============================] - 0s 99us/step - loss: 0.4249 - accuracy: 0.7801 - val_loss: 0.6225 - val_accuracy: 0.6875
Epoch 51/200
191/191 [==============================] - 0s 94us/step - loss: 0.4231 - accuracy: 0.7801 - val_loss: 0.6233 - val_accuracy: 0.6875

Epoch 00051: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 52/200
191/191 [==============================] - 0s 94us/step - loss: 0.4219 - accuracy: 0.7801 - val_loss: 0.6237 - val_accuracy: 0.6875
Epoch 53/200
191/191 [==============================] - 0s 89us/step - loss: 0.4209 - accuracy: 0.7801 - val_loss: 0.6246 - val_accuracy: 0.6875
Epoch 54/200
191/191 [==============================] - 0s 94us/step - loss: 0.4205 - accuracy: 0.7853 - val_loss: 0.6255 - val_accuracy: 0.6875
Epoch 55/200
191/191 [==============================] - 0s 94us/step - loss: 0.4197 - accuracy: 0.7853 - val_loss: 0.6260 - val_accuracy: 0.6875
Epoch 56/200
191/191 [==============================] - 0s 89us/step - loss: 0.4189 - accuracy: 0.7853 - val_loss: 0.6265 - val_accuracy: 0.6875
Epoch 57/200
191/191 [==============================] - 0s 89us/step - loss: 0.4183 - accuracy: 0.7906 - val_loss: 0.6265 - val_accuracy: 0.6875
Epoch 58/200
191/191 [==============================] - 0s 94us/step - loss: 0.4177 - accuracy: 0.7906 - val_loss: 0.6259 - val_accuracy: 0.6875
Epoch 59/200
191/191 [==============================] - 0s 84us/step - loss: 0.4166 - accuracy: 0.7906 - val_loss: 0.6263 - val_accuracy: 0.6875
Epoch 60/200
191/191 [==============================] - 0s 84us/step - loss: 0.4163 - accuracy: 0.7906 - val_loss: 0.6273 - val_accuracy: 0.6875
Epoch 61/200
191/191 [==============================] - 0s 105us/step - loss: 0.4153 - accuracy: 0.7853 - val_loss: 0.6279 - val_accuracy: 0.6875

Epoch 00061: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 62/200
191/191 [==============================] - 0s 94us/step - loss: 0.4146 - accuracy: 0.7906 - val_loss: 0.6280 - val_accuracy: 0.6875
Epoch 63/200
191/191 [==============================] - 0s 89us/step - loss: 0.4142 - accuracy: 0.7906 - val_loss: 0.6279 - val_accuracy: 0.6875
Epoch 64/200
191/191 [==============================] - 0s 84us/step - loss: 0.4139 - accuracy: 0.7906 - val_loss: 0.6278 - val_accuracy: 0.6875
Epoch 65/200
191/191 [==============================] - 0s 84us/step - loss: 0.4136 - accuracy: 0.7906 - val_loss: 0.6280 - val_accuracy: 0.6875
Epoch 66/200
191/191 [==============================] - 0s 89us/step - loss: 0.4131 - accuracy: 0.7906 - val_loss: 0.6283 - val_accuracy: 0.6875
Epoch 67/200
191/191 [==============================] - 0s 94us/step - loss: 0.4128 - accuracy: 0.7906 - val_loss: 0.6285 - val_accuracy: 0.6875
Epoch 68/200
191/191 [==============================] - 0s 84us/step - loss: 0.4125 - accuracy: 0.7906 - val_loss: 0.6286 - val_accuracy: 0.6875
Epoch 69/200
191/191 [==============================] - 0s 89us/step - loss: 0.4120 - accuracy: 0.7906 - val_loss: 0.6286 - val_accuracy: 0.6875
Epoch 70/200
191/191 [==============================] - 0s 84us/step - loss: 0.4117 - accuracy: 0.7906 - val_loss: 0.6288 - val_accuracy: 0.6875
Epoch 71/200
191/191 [==============================] - 0s 99us/step - loss: 0.4113 - accuracy: 0.7906 - val_loss: 0.6289 - val_accuracy: 0.6875

Epoch 00071: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 72/200
191/191 [==============================] - 0s 126us/step - loss: 0.4110 - accuracy: 0.7906 - val_loss: 0.6290 - val_accuracy: 0.6875
Epoch 73/200
191/191 [==============================] - 0s 110us/step - loss: 0.4108 - accuracy: 0.7906 - val_loss: 0.6289 - val_accuracy: 0.6875
Epoch 74/200
191/191 [==============================] - 0s 105us/step - loss: 0.4107 - accuracy: 0.7906 - val_loss: 0.6291 - val_accuracy: 0.6875
Epoch 75/200
191/191 [==============================] - 0s 94us/step - loss: 0.4105 - accuracy: 0.7906 - val_loss: 0.6291 - val_accuracy: 0.6875
Epoch 76/200
191/191 [==============================] - 0s 94us/step - loss: 0.4103 - accuracy: 0.7906 - val_loss: 0.6291 - val_accuracy: 0.6875
Epoch 77/200
191/191 [==============================] - 0s 89us/step - loss: 0.4101 - accuracy: 0.7906 - val_loss: 0.6294 - val_accuracy: 0.6875
Epoch 78/200
191/191 [==============================] - 0s 89us/step - loss: 0.4100 - accuracy: 0.7906 - val_loss: 0.6295 - val_accuracy: 0.6875
Epoch 79/200
191/191 [==============================] - 0s 84us/step - loss: 0.4098 - accuracy: 0.7906 - val_loss: 0.6295 - val_accuracy: 0.6875
Epoch 80/200
191/191 [==============================] - 0s 99us/step - loss: 0.4096 - accuracy: 0.7906 - val_loss: 0.6297 - val_accuracy: 0.6875
Epoch 81/200
191/191 [==============================] - 0s 89us/step - loss: 0.4095 - accuracy: 0.7906 - val_loss: 0.6297 - val_accuracy: 0.6875

Epoch 00081: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 82/200
191/191 [==============================] - 0s 84us/step - loss: 0.4093 - accuracy: 0.7906 - val_loss: 0.6297 - val_accuracy: 0.6875
Epoch 83/200
191/191 [==============================] - 0s 94us/step - loss: 0.4092 - accuracy: 0.7906 - val_loss: 0.6298 - val_accuracy: 0.6875
Epoch 84/200
191/191 [==============================] - 0s 94us/step - loss: 0.4091 - accuracy: 0.7906 - val_loss: 0.6298 - val_accuracy: 0.6875
Epoch 85/200
191/191 [==============================] - 0s 89us/step - loss: 0.4090 - accuracy: 0.7906 - val_loss: 0.6298 - val_accuracy: 0.6875
Epoch 86/200
191/191 [==============================] - 0s 84us/step - loss: 0.4089 - accuracy: 0.7906 - val_loss: 0.6299 - val_accuracy: 0.6875
Epoch 87/200
191/191 [==============================] - 0s 94us/step - loss: 0.4088 - accuracy: 0.7906 - val_loss: 0.6300 - val_accuracy: 0.6875
Epoch 88/200
191/191 [==============================] - 0s 89us/step - loss: 0.4087 - accuracy: 0.7906 - val_loss: 0.6299 - val_accuracy: 0.6875
Epoch 89/200
191/191 [==============================] - 0s 84us/step - loss: 0.4087 - accuracy: 0.7906 - val_loss: 0.6300 - val_accuracy: 0.6875
Epoch 90/200
191/191 [==============================] - 0s 89us/step - loss: 0.4086 - accuracy: 0.7906 - val_loss: 0.6301 - val_accuracy: 0.6875
Epoch 91/200
191/191 [==============================] - 0s 94us/step - loss: 0.4085 - accuracy: 0.7906 - val_loss: 0.6302 - val_accuracy: 0.6875

Epoch 00091: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 92/200
191/191 [==============================] - 0s 94us/step - loss: 0.4084 - accuracy: 0.7906 - val_loss: 0.6302 - val_accuracy: 0.6875
Epoch 93/200
191/191 [==============================] - 0s 89us/step - loss: 0.4083 - accuracy: 0.7906 - val_loss: 0.6302 - val_accuracy: 0.6875
Epoch 94/200
191/191 [==============================] - 0s 94us/step - loss: 0.4083 - accuracy: 0.7906 - val_loss: 0.6302 - val_accuracy: 0.6875
Epoch 95/200
191/191 [==============================] - 0s 99us/step - loss: 0.4082 - accuracy: 0.7906 - val_loss: 0.6302 - val_accuracy: 0.6875
Epoch 96/200
191/191 [==============================] - 0s 105us/step - loss: 0.4082 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875
Epoch 97/200
191/191 [==============================] - 0s 105us/step - loss: 0.4082 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875
Epoch 98/200
191/191 [==============================] - 0s 99us/step - loss: 0.4081 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875
Epoch 99/200
191/191 [==============================] - 0s 99us/step - loss: 0.4081 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875
Epoch 100/200
191/191 [==============================] - 0s 105us/step - loss: 0.4080 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875
Epoch 101/200
191/191 [==============================] - 0s 84us/step - loss: 0.4080 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875

Epoch 00101: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 102/200
191/191 [==============================] - 0s 89us/step - loss: 0.4080 - accuracy: 0.7906 - val_loss: 0.6303 - val_accuracy: 0.6875
Epoch 103/200
191/191 [==============================] - 0s 94us/step - loss: 0.4079 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 104/200
191/191 [==============================] - 0s 94us/step - loss: 0.4079 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 105/200
191/191 [==============================] - 0s 110us/step - loss: 0.4079 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 106/200
191/191 [==============================] - 0s 89us/step - loss: 0.4079 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 107/200
191/191 [==============================] - 0s 99us/step - loss: 0.4078 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 108/200
191/191 [==============================] - 0s 89us/step - loss: 0.4078 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 109/200
191/191 [==============================] - 0s 115us/step - loss: 0.4078 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 110/200
191/191 [==============================] - 0s 105us/step - loss: 0.4078 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 111/200
191/191 [==============================] - 0s 136us/step - loss: 0.4078 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875

Epoch 00111: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 112/200
191/191 [==============================] - 0s 99us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 113/200
191/191 [==============================] - 0s 94us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6304 - val_accuracy: 0.6875
Epoch 114/200
191/191 [==============================] - 0s 89us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 115/200
191/191 [==============================] - 0s 99us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 116/200
191/191 [==============================] - 0s 89us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 117/200
191/191 [==============================] - 0s 94us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 118/200
191/191 [==============================] - 0s 94us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 119/200
191/191 [==============================] - 0s 115us/step - loss: 0.4077 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 120/200
191/191 [==============================] - 0s 84us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 121/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875

Epoch 00121: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 122/200
191/191 [==============================] - 0s 99us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 123/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 124/200
191/191 [==============================] - 0s 99us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 125/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 126/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 127/200
191/191 [==============================] - 0s 99us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 128/200
191/191 [==============================] - 0s 94us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 129/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 130/200
191/191 [==============================] - 0s 94us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 131/200
191/191 [==============================] - 0s 94us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875

Epoch 00131: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 132/200
191/191 [==============================] - 0s 105us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 133/200
191/191 [==============================] - 0s 94us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 134/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 135/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 136/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 137/200
191/191 [==============================] - 0s 94us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 138/200
191/191 [==============================] - 0s 84us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 139/200
191/191 [==============================] - 0s 84us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 140/200
191/191 [==============================] - 0s 84us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 141/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875

Epoch 00141: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 142/200
191/191 [==============================] - 0s 84us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 143/200
191/191 [==============================] - 0s 89us/step - loss: 0.4076 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 144/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 145/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 146/200
191/191 [==============================] - 0s 84us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 147/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 148/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 149/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 150/200
191/191 [==============================] - 0s 84us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 151/200
191/191 [==============================] - 0s 120us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875

Epoch 00151: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 152/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 153/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 154/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 155/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 156/200
191/191 [==============================] - 0s 105us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 157/200
191/191 [==============================] - 0s 110us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 158/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 159/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 160/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 161/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875

Epoch 00161: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 162/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 163/200
191/191 [==============================] - 0s 105us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 164/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6305 - val_accuracy: 0.6875
Epoch 165/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 166/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 167/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 168/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 169/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 170/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 171/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875

Epoch 00171: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 172/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 173/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 174/200
191/191 [==============================] - 0s 84us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 175/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 176/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 177/200
191/191 [==============================] - 0s 115us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 178/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 179/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 180/200
191/191 [==============================] - 0s 110us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 181/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875

Epoch 00181: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 182/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 183/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 184/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 185/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 186/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 187/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 188/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 189/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 190/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 191/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875

Epoch 00191: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 192/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 193/200
191/191 [==============================] - 0s 84us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 194/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 195/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 196/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 197/200
191/191 [==============================] - 0s 99us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 198/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 199/200
191/191 [==============================] - 0s 94us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
Epoch 200/200
191/191 [==============================] - 0s 89us/step - loss: 0.4075 - accuracy: 0.7906 - val_loss: 0.6306 - val_accuracy: 0.6875
In [287]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 200)
In [288]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
64/64 [==============================] - 0s 62us/step
test loss: 0.6305528879165649, test accuracy: 0.6875
In [289]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.6107634543178974
In [290]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.013867488443759624
[[42  5]
 [15  2]]

KMeans

In [434]:
X
Out[434]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12
0 -0.369691 -0.881824 -0.095656 -0.923999 -0.030645 -0.834931 -1.031650 -0.840942 -0.677716 1.084098 -1.064999 -1.156623
1 -0.175875 -0.403800 -0.657709 -0.201259 1.691433 -0.672783 -0.119944 -0.440080 0.339906 1.084098 0.504608 0.931676
2 0.894452 -0.189794 1.959063 0.169276 -0.403611 -1.036954 1.447615 -0.340767 -0.846170 -0.515065 -0.699878 0.032355
3 0.060782 -0.392075 0.826233 -0.048480 1.789786 -0.552163 0.121028 -0.111355 0.220614 1.084098 0.073241 1.176257
4 -1.116536 -0.923742 -1.238971 -0.919598 0.313068 -1.160111 1.316032 -0.700013 -1.600210 1.084098 -1.072155 1.270095
5 -1.082752 -0.067472 -1.142511 -0.923999 0.389201 -1.061180 2.067035 -0.269819 -1.620482 -0.468180 -0.962315 1.853511
6 -1.202528 -0.776453 -1.135820 -0.773268 -0.409932 -0.832224 -0.631931 -0.119492 1.620288 0.162930 -1.063358 0.001987
7 -1.206944 0.500703 -0.243295 -0.611928 0.262760 -0.855004 0.001441 -1.579225 0.245997 1.084098 -1.197067 -0.681282
8 -0.982006 0.421831 0.984997 -0.550391 0.215104 -1.100712 2.248321 -0.336428 1.282109 1.084098 -0.061910 0.480931
9 1.703175 1.029234 1.508859 0.964653 2.164165 0.482073 -0.794175 0.729102 0.721867 0.439251 0.198086 1.778147
10 1.528139 0.898498 0.923889 0.634045 2.016059 0.674138 -0.430188 0.558129 1.200855 0.618822 0.291110 1.853511
11 0.334361 -0.301383 -0.450307 -0.470199 -0.977542 0.863046 -0.396657 -0.882307 0.259614 0.809320 1.750633 0.245556
12 0.597458 0.773201 0.182265 0.104921 0.580017 0.644184 1.433111 1.735353 -0.712181 -0.818426 -0.129551 -0.039236
13 -0.493625 1.341798 -0.632970 2.666081 2.015672 1.537362 2.432631 0.500840 1.337627 1.084098 -0.069418 1.097765
14 0.962230 0.028408 0.059003 -0.233385 1.425585 1.226062 1.160066 1.604723 0.272753 1.084098 0.709073 1.356660
15 -0.843247 0.160055 1.959063 -0.354971 -0.294051 -0.485118 -0.796417 -0.212355 -0.168152 -0.782723 -0.232169 -0.234956
16 -0.256403 1.322075 1.520118 0.907583 1.032003 0.577931 0.414295 0.551597 0.783033 1.084098 0.088744 1.848078
17 -0.456352 2.142046 1.959063 1.734877 1.472675 0.985568 0.646614 0.230853 0.051480 -1.122573 -0.594948 0.587848
18 -0.637040 -1.030219 -1.165495 -0.606800 2.164165 -1.161299 -0.882343 1.727762 -1.021803 -1.420384 -1.197067 -1.157909
19 -1.204564 -1.022455 -0.683305 -0.923999 -0.935167 -0.988596 -0.688535 1.735353 -0.372860 -1.581696 -1.197067 -0.315002
20 -1.077420 -0.314272 1.036699 -0.850174 -0.071202 -1.104286 0.102157 -0.867378 -0.464391 0.591092 -1.048902 1.853510
21 -0.494728 -1.023105 -0.383945 -0.918858 0.498899 -1.042513 -0.072226 -0.187025 -0.589871 1.084098 -0.986967 0.537859
22 -0.525116 0.187277 1.220635 -0.221678 0.589822 -1.115053 0.421737 0.656325 -0.098846 1.084098 -0.740109 1.853156
23 -0.282675 0.571926 -0.333097 2.860439 1.250860 1.434107 2.006573 0.362041 1.620288 0.896295 -0.274184 1.257508
24 1.898264 1.240876 0.302806 -0.427292 -0.709276 -0.159183 -0.474972 0.273501 -0.789908 -0.085745 0.868592 -0.492577
25 0.449834 -0.315494 -0.187659 -0.426842 -0.875213 0.916315 -0.212134 -0.503325 0.251438 0.499009 1.750633 0.286629
26 -0.745212 -0.457525 -0.261214 1.653228 0.471865 -0.532478 3.048431 -0.344057 0.968569 -1.690268 0.186111 -0.576484
27 -1.146960 2.737908 0.205319 -0.915593 1.224042 -1.080890 2.432039 -1.475399 1.443033 -0.136290 -1.161316 1.353237
28 -0.006670 2.177984 -0.364684 -0.232426 2.164165 -0.501370 0.481222 -0.462802 -0.495909 0.200246 -0.925963 -0.127350
29 -1.198149 1.245381 1.824748 -0.544122 2.094805 -0.986543 1.141795 -1.276929 1.620288 0.772653 -1.183303 -0.633811
... ... ... ... ... ... ... ... ... ... ... ... ...
225 -0.850302 -0.813520 -1.238258 0.389340 -0.820553 -0.147948 -0.867381 -1.187728 1.620288 -0.228859 0.676545 -1.166259
226 1.113887 -0.637363 -0.178882 -0.867049 -0.180374 -0.059762 -1.031280 1.075930 1.620288 -0.831212 -0.536781 -0.852284
227 -0.939216 -1.028651 -1.083821 0.671288 -1.104197 0.395233 -0.935964 -0.545426 -0.734897 -1.634988 1.750633 -0.991450
228 0.323318 -0.538560 1.959063 0.469737 -0.928167 0.315432 -0.838357 0.553622 -0.985928 -0.404420 0.017683 -0.868443
229 0.651351 0.768776 1.651644 0.613579 -0.672599 0.778338 -0.288893 1.735353 -0.698435 -0.803492 0.749774 -0.176628
230 1.208263 0.590562 -0.856200 -0.037362 -0.890995 0.154056 -0.224548 0.060386 1.620288 -0.577590 1.081262 0.137052
231 1.629732 -0.424079 -0.793853 -0.297522 0.876511 -0.502033 -0.540340 1.735353 1.179917 0.331082 -0.186926 0.043014
232 1.097162 0.374225 -0.897801 -0.315126 0.016346 0.367805 -0.397202 0.034201 0.736455 1.084098 1.111692 0.505493
233 1.452404 -0.818664 1.001952 0.094142 -1.157451 -0.673136 -1.031650 -0.681166 -1.578572 -1.040029 1.750633 -1.088779
234 0.687938 0.363684 -0.077785 1.495170 -0.946513 0.162091 -0.875163 -0.486609 -0.657582 -0.728212 1.750633 0.166071
235 -0.511818 -1.019067 1.937312 -0.923999 -1.199069 -1.157422 1.424573 -1.506879 -0.941792 1.084098 -0.598433 1.497327
236 -0.649452 1.110585 0.023607 -0.619494 -0.351503 -0.377758 0.774664 -1.037327 -0.008921 1.084098 -0.300447 -0.799951
237 -0.675917 0.864345 1.959063 -0.702632 0.767520 -0.242236 -0.320118 -0.889868 -0.543499 0.953653 -0.295863 0.184530
238 -0.820946 0.480728 -0.348445 1.706293 -0.634861 -0.548325 -0.658463 -1.446347 -0.595881 -1.346009 1.750633 -0.891882
239 0.335654 -0.570366 0.440736 2.255028 -0.777152 -0.336048 -0.104033 0.504513 -0.304387 -0.899407 1.750633 0.586226
240 0.772849 -0.288034 0.998235 2.707124 0.694491 1.600236 -0.599878 0.863164 1.620288 -0.793363 1.076398 0.313476
241 -0.725775 -0.253169 -1.058923 -0.019515 -0.367824 -0.703472 0.163373 0.093846 1.620288 0.712877 -0.410881 0.776774
242 -0.883133 0.370675 -1.102573 0.232760 -1.047240 -0.968298 0.151912 -0.346068 1.620288 -0.219994 -0.255762 0.629036
243 -0.504299 -0.158035 -0.129250 0.170764 0.127833 -0.424841 0.960604 0.067663 1.620288 0.679838 0.005478 0.547596
244 -0.829496 0.743464 1.959063 0.266679 0.653670 -1.095468 1.900161 1.007911 -0.442746 0.870766 -0.246038 1.728101
245 -1.203285 0.455171 0.496797 -0.873183 0.629642 -1.113864 1.271226 0.542153 -0.636367 1.084098 -1.140705 0.222521
246 -1.122600 -0.442839 1.824660 -0.819762 1.050840 -1.048446 1.937596 0.131208 -0.474964 0.536040 -0.800922 1.853511
247 -1.206944 -0.795159 -1.159900 -0.187089 0.233445 2.452569 0.183293 1.253589 -1.394630 0.753640 -1.083641 -1.163597
248 -0.501382 -1.030219 -0.612979 -0.923999 -0.608020 -0.863086 -0.124132 -0.944272 -1.081271 1.084098 -1.170461 -1.244399
249 -0.704093 -0.985145 1.199508 -0.321552 2.164165 -0.193249 -0.295412 -0.394034 0.032818 -1.672473 -0.724683 -1.067831
250 0.831957 -0.173367 1.636565 1.345345 -0.989257 0.826135 -0.824412 -0.072225 -0.255975 -1.005500 1.750633 -1.067130
251 -0.895156 -1.022380 -0.410545 2.237273 -1.199069 0.687783 -1.006468 -0.694581 -1.311235 -1.270219 1.750633 -1.233006
252 -0.546320 -0.631883 -0.800789 -0.187107 -1.179353 0.464606 -0.905192 -0.113592 0.167123 -0.843254 1.750633 -1.148945
253 -0.591349 -0.947758 -0.915262 -0.579179 0.089961 0.047756 -0.905554 -1.413215 -1.184716 1.084098 0.384684 -1.035788
254 -0.072947 -0.912155 0.150662 -0.246506 -1.198328 -0.779047 -1.031650 0.022522 -1.604883 -1.609572 1.750632 -1.247525

255 rows × 12 columns

In [435]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[435]:
[3060.0,
 2594.1549165385713,
 2309.549481414484,
 2089.610498278143,
 1959.0566777030967,
 1842.9588281368096,
 1736.092921360928,
 1663.5689730025233,
 1609.4951000525748,
 1557.5767570007226,
 1514.8225721032359,
 1447.3960995377222,
 1422.027087832329,
 1389.3032827223215]
In [436]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[436]:
[<matplotlib.lines.Line2D at 0x1b82d902048>]

K=2

In [437]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[437]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [438]:
kmeans_ch.labels_
Out[438]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1,
       1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1,
       1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
       1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0,
       0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1])
In [439]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[439]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1,
       1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1,
       1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
       1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0,
       0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1])
In [440]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [441]:
X
Out[441]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 Cluster chosen
0 -0.369691 -0.881824 -0.095656 -0.923999 -0.030645 -0.834931 -1.031650 -0.840942 -0.677716 1.084098 -1.064999 -1.156623 0 0
1 -0.175875 -0.403800 -0.657709 -0.201259 1.691433 -0.672783 -0.119944 -0.440080 0.339906 1.084098 0.504608 0.931676 0 0
2 0.894452 -0.189794 1.959063 0.169276 -0.403611 -1.036954 1.447615 -0.340767 -0.846170 -0.515065 -0.699878 0.032355 0 0
3 0.060782 -0.392075 0.826233 -0.048480 1.789786 -0.552163 0.121028 -0.111355 0.220614 1.084098 0.073241 1.176257 0 0
4 -1.116536 -0.923742 -1.238971 -0.919598 0.313068 -1.160111 1.316032 -0.700013 -1.600210 1.084098 -1.072155 1.270095 0 0
5 -1.082752 -0.067472 -1.142511 -0.923999 0.389201 -1.061180 2.067035 -0.269819 -1.620482 -0.468180 -0.962315 1.853511 0 0
6 -1.202528 -0.776453 -1.135820 -0.773268 -0.409932 -0.832224 -0.631931 -0.119492 1.620288 0.162930 -1.063358 0.001987 0 0
7 -1.206944 0.500703 -0.243295 -0.611928 0.262760 -0.855004 0.001441 -1.579225 0.245997 1.084098 -1.197067 -0.681282 0 0
8 -0.982006 0.421831 0.984997 -0.550391 0.215104 -1.100712 2.248321 -0.336428 1.282109 1.084098 -0.061910 0.480931 0 0
9 1.703175 1.029234 1.508859 0.964653 2.164165 0.482073 -0.794175 0.729102 0.721867 0.439251 0.198086 1.778147 0 0
10 1.528139 0.898498 0.923889 0.634045 2.016059 0.674138 -0.430188 0.558129 1.200855 0.618822 0.291110 1.853511 0 0
11 0.334361 -0.301383 -0.450307 -0.470199 -0.977542 0.863046 -0.396657 -0.882307 0.259614 0.809320 1.750633 0.245556 1 0
12 0.597458 0.773201 0.182265 0.104921 0.580017 0.644184 1.433111 1.735353 -0.712181 -0.818426 -0.129551 -0.039236 0 0
13 -0.493625 1.341798 -0.632970 2.666081 2.015672 1.537362 2.432631 0.500840 1.337627 1.084098 -0.069418 1.097765 0 0
14 0.962230 0.028408 0.059003 -0.233385 1.425585 1.226062 1.160066 1.604723 0.272753 1.084098 0.709073 1.356660 0 0
15 -0.843247 0.160055 1.959063 -0.354971 -0.294051 -0.485118 -0.796417 -0.212355 -0.168152 -0.782723 -0.232169 -0.234956 0 0
16 -0.256403 1.322075 1.520118 0.907583 1.032003 0.577931 0.414295 0.551597 0.783033 1.084098 0.088744 1.848078 0 0
17 -0.456352 2.142046 1.959063 1.734877 1.472675 0.985568 0.646614 0.230853 0.051480 -1.122573 -0.594948 0.587848 0 0
18 -0.637040 -1.030219 -1.165495 -0.606800 2.164165 -1.161299 -0.882343 1.727762 -1.021803 -1.420384 -1.197067 -1.157909 0 0
19 -1.204564 -1.022455 -0.683305 -0.923999 -0.935167 -0.988596 -0.688535 1.735353 -0.372860 -1.581696 -1.197067 -0.315002 0 0
20 -1.077420 -0.314272 1.036699 -0.850174 -0.071202 -1.104286 0.102157 -0.867378 -0.464391 0.591092 -1.048902 1.853510 0 0
21 -0.494728 -1.023105 -0.383945 -0.918858 0.498899 -1.042513 -0.072226 -0.187025 -0.589871 1.084098 -0.986967 0.537859 0 0
22 -0.525116 0.187277 1.220635 -0.221678 0.589822 -1.115053 0.421737 0.656325 -0.098846 1.084098 -0.740109 1.853156 0 0
23 -0.282675 0.571926 -0.333097 2.860439 1.250860 1.434107 2.006573 0.362041 1.620288 0.896295 -0.274184 1.257508 0 0
24 1.898264 1.240876 0.302806 -0.427292 -0.709276 -0.159183 -0.474972 0.273501 -0.789908 -0.085745 0.868592 -0.492577 1 0
25 0.449834 -0.315494 -0.187659 -0.426842 -0.875213 0.916315 -0.212134 -0.503325 0.251438 0.499009 1.750633 0.286629 1 0
26 -0.745212 -0.457525 -0.261214 1.653228 0.471865 -0.532478 3.048431 -0.344057 0.968569 -1.690268 0.186111 -0.576484 1 0
27 -1.146960 2.737908 0.205319 -0.915593 1.224042 -1.080890 2.432039 -1.475399 1.443033 -0.136290 -1.161316 1.353237 0 0
28 -0.006670 2.177984 -0.364684 -0.232426 2.164165 -0.501370 0.481222 -0.462802 -0.495909 0.200246 -0.925963 -0.127350 0 0
29 -1.198149 1.245381 1.824748 -0.544122 2.094805 -0.986543 1.141795 -1.276929 1.620288 0.772653 -1.183303 -0.633811 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
225 -0.850302 -0.813520 -1.238258 0.389340 -0.820553 -0.147948 -0.867381 -1.187728 1.620288 -0.228859 0.676545 -1.166259 1 1
226 1.113887 -0.637363 -0.178882 -0.867049 -0.180374 -0.059762 -1.031280 1.075930 1.620288 -0.831212 -0.536781 -0.852284 1 1
227 -0.939216 -1.028651 -1.083821 0.671288 -1.104197 0.395233 -0.935964 -0.545426 -0.734897 -1.634988 1.750633 -0.991450 1 1
228 0.323318 -0.538560 1.959063 0.469737 -0.928167 0.315432 -0.838357 0.553622 -0.985928 -0.404420 0.017683 -0.868443 1 1
229 0.651351 0.768776 1.651644 0.613579 -0.672599 0.778338 -0.288893 1.735353 -0.698435 -0.803492 0.749774 -0.176628 1 1
230 1.208263 0.590562 -0.856200 -0.037362 -0.890995 0.154056 -0.224548 0.060386 1.620288 -0.577590 1.081262 0.137052 1 1
231 1.629732 -0.424079 -0.793853 -0.297522 0.876511 -0.502033 -0.540340 1.735353 1.179917 0.331082 -0.186926 0.043014 0 1
232 1.097162 0.374225 -0.897801 -0.315126 0.016346 0.367805 -0.397202 0.034201 0.736455 1.084098 1.111692 0.505493 1 1
233 1.452404 -0.818664 1.001952 0.094142 -1.157451 -0.673136 -1.031650 -0.681166 -1.578572 -1.040029 1.750633 -1.088779 1 1
234 0.687938 0.363684 -0.077785 1.495170 -0.946513 0.162091 -0.875163 -0.486609 -0.657582 -0.728212 1.750633 0.166071 1 1
235 -0.511818 -1.019067 1.937312 -0.923999 -1.199069 -1.157422 1.424573 -1.506879 -0.941792 1.084098 -0.598433 1.497327 0 1
236 -0.649452 1.110585 0.023607 -0.619494 -0.351503 -0.377758 0.774664 -1.037327 -0.008921 1.084098 -0.300447 -0.799951 0 1
237 -0.675917 0.864345 1.959063 -0.702632 0.767520 -0.242236 -0.320118 -0.889868 -0.543499 0.953653 -0.295863 0.184530 0 1
238 -0.820946 0.480728 -0.348445 1.706293 -0.634861 -0.548325 -0.658463 -1.446347 -0.595881 -1.346009 1.750633 -0.891882 1 1
239 0.335654 -0.570366 0.440736 2.255028 -0.777152 -0.336048 -0.104033 0.504513 -0.304387 -0.899407 1.750633 0.586226 1 1
240 0.772849 -0.288034 0.998235 2.707124 0.694491 1.600236 -0.599878 0.863164 1.620288 -0.793363 1.076398 0.313476 1 1
241 -0.725775 -0.253169 -1.058923 -0.019515 -0.367824 -0.703472 0.163373 0.093846 1.620288 0.712877 -0.410881 0.776774 0 1
242 -0.883133 0.370675 -1.102573 0.232760 -1.047240 -0.968298 0.151912 -0.346068 1.620288 -0.219994 -0.255762 0.629036 0 1
243 -0.504299 -0.158035 -0.129250 0.170764 0.127833 -0.424841 0.960604 0.067663 1.620288 0.679838 0.005478 0.547596 0 1
244 -0.829496 0.743464 1.959063 0.266679 0.653670 -1.095468 1.900161 1.007911 -0.442746 0.870766 -0.246038 1.728101 0 1
245 -1.203285 0.455171 0.496797 -0.873183 0.629642 -1.113864 1.271226 0.542153 -0.636367 1.084098 -1.140705 0.222521 0 1
246 -1.122600 -0.442839 1.824660 -0.819762 1.050840 -1.048446 1.937596 0.131208 -0.474964 0.536040 -0.800922 1.853511 0 1
247 -1.206944 -0.795159 -1.159900 -0.187089 0.233445 2.452569 0.183293 1.253589 -1.394630 0.753640 -1.083641 -1.163597 0 1
248 -0.501382 -1.030219 -0.612979 -0.923999 -0.608020 -0.863086 -0.124132 -0.944272 -1.081271 1.084098 -1.170461 -1.244399 0 1
249 -0.704093 -0.985145 1.199508 -0.321552 2.164165 -0.193249 -0.295412 -0.394034 0.032818 -1.672473 -0.724683 -1.067831 0 1
250 0.831957 -0.173367 1.636565 1.345345 -0.989257 0.826135 -0.824412 -0.072225 -0.255975 -1.005500 1.750633 -1.067130 1 1
251 -0.895156 -1.022380 -0.410545 2.237273 -1.199069 0.687783 -1.006468 -0.694581 -1.311235 -1.270219 1.750633 -1.233006 1 1
252 -0.546320 -0.631883 -0.800789 -0.187107 -1.179353 0.464606 -0.905192 -0.113592 0.167123 -0.843254 1.750633 -1.148945 1 1
253 -0.591349 -0.947758 -0.915262 -0.579179 0.089961 0.047756 -0.905554 -1.413215 -1.184716 1.084098 0.384684 -1.035788 0 1
254 -0.072947 -0.912155 0.150662 -0.246506 -1.198328 -0.779047 -1.031650 0.022522 -1.604883 -1.609572 1.750632 -1.247525 1 1

255 rows × 14 columns

In [442]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[442]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b82d9496a0>
In [443]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

Gramma

ANN

In [291]:
X = df_n_ps_std_ch[2]
In [292]:
y = df_n_ps[2]['chosen']
In [293]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [294]:
X_train.shape
Out[294]:
(162, 12)
In [295]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [296]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [297]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [451]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.006, 'max_iter': 20}, que permiten obtener un Accuracy de 75.31% y un Kappa del 13.17
Tiempo total: 26.15 minutos
In [298]:
grid.best_params_={'activation': 'relu', 'hidden_layer_sizes': (30, 30), 'learning_rate_init': 0.006, 'max_iter': 20}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [299]:
input_tensor = Input(shape = (n0,))
In [300]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [301]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [302]:
model.summary()
Model: "model_17"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_18 (InputLayer)        (None, 12)                0         
_________________________________________________________________
dense_49 (Dense)             (None, 30)                390       
_________________________________________________________________
dense_50 (Dense)             (None, 30)                930       
_________________________________________________________________
dense_51 (Dense)             (None, 1)                 31        
=================================================================
Total params: 1,351
Trainable params: 1,351
Non-trainable params: 0
_________________________________________________________________
In [303]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 162 samples, validate on 54 samples
Epoch 1/20
162/162 [==============================] - 0s 3ms/step - loss: 0.6482 - accuracy: 0.6049 - val_loss: 0.6607 - val_accuracy: 0.6667
Epoch 2/20
162/162 [==============================] - 0s 117us/step - loss: 0.5950 - accuracy: 0.7222 - val_loss: 0.6863 - val_accuracy: 0.6667
Epoch 3/20
162/162 [==============================] - 0s 123us/step - loss: 0.5654 - accuracy: 0.7222 - val_loss: 0.6563 - val_accuracy: 0.6667
Epoch 4/20
162/162 [==============================] - 0s 117us/step - loss: 0.5451 - accuracy: 0.7222 - val_loss: 0.6296 - val_accuracy: 0.6667
Epoch 5/20
162/162 [==============================] - 0s 117us/step - loss: 0.5314 - accuracy: 0.7222 - val_loss: 0.6051 - val_accuracy: 0.6667
Epoch 6/20
162/162 [==============================] - 0s 130us/step - loss: 0.5334 - accuracy: 0.7346 - val_loss: 0.6051 - val_accuracy: 0.6852
Epoch 7/20
162/162 [==============================] - 0s 136us/step - loss: 0.5214 - accuracy: 0.7407 - val_loss: 0.6138 - val_accuracy: 0.6667
Epoch 8/20
162/162 [==============================] - 0s 117us/step - loss: 0.5048 - accuracy: 0.7222 - val_loss: 0.6388 - val_accuracy: 0.6667
Epoch 9/20
162/162 [==============================] - 0s 130us/step - loss: 0.5033 - accuracy: 0.7284 - val_loss: 0.6578 - val_accuracy: 0.6667
Epoch 10/20
162/162 [==============================] - 0s 123us/step - loss: 0.4883 - accuracy: 0.7284 - val_loss: 0.6463 - val_accuracy: 0.6667
Epoch 11/20
162/162 [==============================] - 0s 105us/step - loss: 0.4795 - accuracy: 0.7407 - val_loss: 0.6296 - val_accuracy: 0.6852
Epoch 12/20
162/162 [==============================] - 0s 105us/step - loss: 0.4644 - accuracy: 0.7407 - val_loss: 0.6182 - val_accuracy: 0.6852
Epoch 13/20
162/162 [==============================] - 0s 99us/step - loss: 0.4544 - accuracy: 0.7407 - val_loss: 0.6273 - val_accuracy: 0.6667
Epoch 14/20
162/162 [==============================] - 0s 111us/step - loss: 0.4402 - accuracy: 0.7716 - val_loss: 0.6358 - val_accuracy: 0.7037
Epoch 15/20
162/162 [==============================] - 0s 99us/step - loss: 0.4355 - accuracy: 0.8025 - val_loss: 0.6463 - val_accuracy: 0.6481
Epoch 16/20
162/162 [==============================] - 0s 99us/step - loss: 0.4332 - accuracy: 0.7901 - val_loss: 0.6489 - val_accuracy: 0.6667
Epoch 17/20
162/162 [==============================] - 0s 99us/step - loss: 0.4194 - accuracy: 0.7963 - val_loss: 0.6496 - val_accuracy: 0.6852
Epoch 18/20
162/162 [==============================] - 0s 117us/step - loss: 0.4019 - accuracy: 0.8086 - val_loss: 0.6535 - val_accuracy: 0.6667
Epoch 19/20
162/162 [==============================] - 0s 99us/step - loss: 0.3849 - accuracy: 0.7963 - val_loss: 0.6350 - val_accuracy: 0.6481
Epoch 20/20
162/162 [==============================] - 0s 111us/step - loss: 0.3683 - accuracy: 0.8148 - val_loss: 0.6268 - val_accuracy: 0.6667
In [304]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 20)
In [305]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
54/54 [==============================] - 0s 111us/step
test loss: 0.6267764259267736, test accuracy: 0.6666666865348816
In [306]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.7052469135802469
In [307]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.12903225806451613
[[32  4]
 [14  4]]

KMeans

In [462]:
X
Out[462]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12
0 2.134448 -0.644769 -0.741425 -0.871269 0.189298 -0.623423 -0.529336 0.357204 -0.325100 0.746582 -0.476439 0.217416
1 -0.027862 1.690489 0.055411 -0.561004 0.495091 -0.306711 0.886175 -0.561042 1.431598 0.158038 0.235945 1.435645
2 -0.377201 2.125071 -0.304151 2.056775 -0.998633 -0.428416 1.821160 -1.229826 -0.732612 -1.686385 -0.297109 0.515440
3 -0.590231 0.143657 -0.601072 1.282246 -0.578812 -0.045810 1.094766 0.365693 1.431598 -0.618841 0.417565 0.135593
4 -0.233782 0.378000 1.366350 0.941326 -0.898589 -0.324641 1.117103 0.022453 1.431598 -0.507270 0.477771 -0.297988
5 -1.227009 0.847541 0.442158 -0.853849 0.281746 -0.856990 1.323879 -0.878380 0.254105 1.353628 -1.133848 -0.771482
6 -0.412709 0.592753 0.609235 -0.136011 0.556074 -0.212081 0.908709 0.462724 1.149973 1.353628 -0.748234 -0.690894
7 -0.724090 0.958854 0.128425 -0.061221 0.393577 -0.556616 0.938013 0.500038 1.431598 0.461706 -0.450195 -0.406088
8 2.134447 -0.911970 1.617930 -0.854742 -0.491688 -0.873878 -0.905227 1.840158 -0.631307 -0.682701 -0.960551 -0.147169
9 2.134447 -0.392687 1.219691 -0.556103 -0.426645 -0.134948 -0.847940 0.441485 -0.677993 0.490526 -0.789786 -0.234072
10 2.134447 -1.196966 -0.393629 -1.145266 -0.441885 -0.686755 -1.088890 -0.281516 -1.424181 0.084569 -1.220743 -1.033826
11 1.555294 -0.387633 1.984770 -0.288986 -0.227389 0.249804 -0.754853 2.092450 -0.616701 -0.159824 -0.411527 -0.481036
12 -0.104131 -1.361797 -0.426957 -1.264951 -0.817715 2.665316 -1.021180 -0.205321 -1.821663 -0.584671 -0.883267 -1.449205
13 -0.010446 -0.441706 0.232007 -0.347890 -0.731516 1.121770 -0.797580 2.092450 -0.205641 -0.471642 1.220384 -0.176572
14 0.338185 2.125071 -1.191115 0.600768 -0.845231 2.050341 1.870414 -1.054000 -1.031042 -1.723459 0.138895 -1.471125
15 0.639533 0.029127 -0.874094 0.222996 -0.545595 2.202264 -0.496728 -0.667599 0.029151 -1.291591 1.713019 -0.713484
16 1.192505 1.901961 -0.906081 -0.118624 -0.698455 1.792230 -0.847558 -0.879541 0.236249 -1.396153 1.713019 -0.946362
17 1.282493 -1.425745 1.152539 -1.289274 0.653371 -1.140708 -0.583855 2.092450 -1.821663 -1.624757 -1.464321 0.825445
18 -0.285085 -0.286095 0.719085 -0.553923 -0.239820 -0.233179 -0.248394 2.092450 -0.347790 0.612253 0.820535 0.965173
19 2.134447 -1.173436 -0.252515 -0.849286 1.426186 -0.931306 -0.744081 0.963581 -1.061327 0.051966 -0.724467 1.649501
20 -0.278722 0.441120 0.539755 0.073408 1.245563 -0.336695 0.870546 0.829705 0.507678 1.353628 0.336778 1.654823
21 -0.611002 -0.704814 -0.748805 -0.482771 0.602398 -0.299716 0.585889 0.316968 0.460384 1.353628 0.031354 0.538523
22 0.042172 0.682883 1.435959 0.203185 0.579082 -0.207847 1.042158 0.812349 0.787282 1.101013 -0.163470 1.833579
23 2.134448 0.023362 1.143286 0.064566 0.079445 1.697608 0.134204 1.514260 -0.602064 0.050733 1.157687 0.140809
24 0.973235 -0.202714 0.576270 -0.222220 -0.676313 0.245482 -0.104773 2.092451 0.270294 0.386750 0.176120 0.589755
25 2.134447 -0.628020 0.565132 -0.080497 0.062269 1.923406 -0.349591 1.577616 -0.738948 -0.236942 0.974727 -0.387827
26 -0.475449 -0.272759 -0.483306 2.568625 -0.432218 0.188443 -0.613425 -0.934384 -1.078606 -1.527243 1.713017 -1.230804
27 -1.274193 -1.259433 -0.866787 0.542028 -0.775059 -0.486970 -0.840968 1.043082 1.431597 -0.491858 -1.133072 -1.447443
28 -0.649286 2.125072 -0.798596 0.055357 1.448693 -0.258788 1.830622 -0.960651 0.580136 0.182651 -0.817420 1.057098
29 0.526348 0.918778 -1.174363 0.653683 -1.116289 0.446306 -0.968896 -0.898607 0.359556 -1.164241 1.713019 -1.326477
... ... ... ... ... ... ... ... ... ... ... ... ...
186 -1.319423 0.175014 -1.200954 -1.285944 -0.257920 -1.092671 -0.865905 -1.304258 1.431598 0.180121 -1.464321 -1.376110
187 -0.108219 2.125071 -0.213805 -0.324535 1.060404 -0.410798 2.017305 -0.960634 -0.126219 0.768136 -0.289177 0.648942
188 0.564307 -0.555458 -0.658358 0.591215 -0.731287 -0.224320 -0.776096 0.187651 1.431598 -0.214351 0.838382 -0.562694
189 0.641104 -0.440581 -0.583337 0.310188 -0.632755 -0.132319 -0.662616 -0.065449 1.431598 -0.606868 1.075355 -0.564249
190 0.573210 -0.763939 -0.684625 1.454301 -0.728330 0.803913 -0.748853 0.725106 1.431598 -0.372785 1.641772 -0.524504
191 0.308827 -0.760763 0.972410 -0.682585 1.394392 -0.667061 0.263256 2.092451 -1.002578 0.003476 -0.841976 0.597412
192 2.058242 -1.400292 0.536272 -1.288314 0.742803 -1.140708 -0.707389 2.092451 -1.470650 0.803010 -0.888001 1.611969
193 1.113210 -0.303602 1.445305 -0.282286 1.802868 -0.661326 -0.240367 2.092451 -0.790224 0.079975 -0.437082 1.116475
194 -0.450762 -0.010607 2.004804 -0.654718 0.376854 -0.729410 -0.771690 -0.278808 -0.906778 1.353628 -0.459874 1.481964
195 -0.185698 0.215292 1.601736 -0.053667 0.589974 -0.573847 0.000101 0.433513 0.029899 1.101702 -0.046786 1.833579
196 -0.759927 -0.321732 1.394992 -0.641057 0.457596 -0.862435 -0.236311 0.169558 -0.637600 0.890452 -0.491654 1.833579
197 2.134447 -0.466820 0.555283 -0.588683 -0.744840 0.462895 -0.666257 0.482783 -0.396687 0.433259 -0.525795 -0.479641
198 1.112509 -0.193649 0.414393 0.029650 -0.429733 0.928074 -0.345182 1.218287 0.542487 1.353628 0.718168 0.277470
199 0.905453 -0.524964 1.086044 -0.043938 -0.565400 0.694749 -0.521263 1.416977 -0.106187 1.353628 -0.300623 -0.411341
200 -0.899647 0.791525 -0.677180 1.189597 -0.058699 -0.824832 -0.542755 -0.582878 1.431598 -0.375258 -0.485654 -0.124219
201 -0.733318 2.125071 -0.643560 0.518336 0.700518 -0.615231 0.605320 -0.834916 0.796712 0.392851 -0.427106 -0.084736
202 -1.324292 0.274775 -1.200954 0.489476 2.489246 -1.088788 2.630785 -1.304258 -0.653881 -1.056812 -1.464109 -0.646684
203 -0.765770 -0.634466 -0.695396 -0.104253 1.440461 -0.292064 -0.160953 -0.262483 -0.585612 1.353628 -0.534450 0.252552
204 -1.298649 -0.865495 -1.182382 -1.154626 -0.789274 -1.140708 0.189079 -1.172061 -0.779701 0.811604 -1.201474 1.833579
205 -0.432584 0.228569 0.159869 -0.537903 1.169801 -0.537451 1.083646 0.017223 -0.308391 1.353628 -0.612028 0.693463
206 -0.098153 1.506263 0.399753 -0.402570 0.366102 0.205628 1.813053 0.465722 1.228385 1.353628 0.351359 0.987155
207 -0.684201 1.261357 0.827726 -0.398593 1.376622 -0.085790 0.419996 -0.263408 0.575117 1.353628 -0.215556 0.466853
208 0.057892 1.228574 0.545400 -0.211451 2.273548 0.295789 2.675786 -0.458831 -0.436474 0.159917 -0.736888 0.615728
209 1.529114 -1.141477 -0.952529 -0.312120 -1.111930 -0.845550 -0.966860 0.531829 1.431598 -0.891915 0.316064 -1.051750
210 0.792164 -1.425745 -1.171065 2.751983 -1.223875 0.047688 -1.104958 1.841305 0.432920 -1.723459 -0.123688 -1.471125
211 1.033808 -1.411737 -0.414286 0.062690 -1.139737 -0.864229 -1.024376 0.619133 1.431598 -1.515595 -0.325421 -1.435723
212 0.856046 -0.404521 -0.808726 0.490430 -0.734186 0.939368 -0.730764 -0.618840 1.257115 -0.514941 1.713019 -0.664148
213 1.074157 -0.156701 -0.547902 1.101943 -0.759892 0.974616 -0.748565 -0.668486 1.431598 0.078809 1.447463 -0.508043
214 -0.466039 1.901913 -0.716582 -0.377357 1.260897 0.205794 0.988225 -0.034946 -0.146391 1.353628 -0.154870 0.032420
215 -1.281934 -1.409413 1.316491 -1.289274 -1.223875 2.665316 -1.067543 1.859460 -1.821663 1.125144 0.068239 -1.471125

216 rows × 12 columns

In [463]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[463]:
[2592.0,
 2036.504626486333,
 1690.4433364167144,
 1572.8425668108105,
 1474.2750884480324,
 1381.4716826450067,
 1300.6860863710224,
 1239.060407813203,
 1202.9625977479168,
 1129.4403597805695,
 1086.8575546528705,
 1056.8830460036334,
 1009.1121548696354,
 981.6337584386087]
In [464]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[464]:
[<matplotlib.lines.Line2D at 0x1b82f456f60>]

K=2

In [465]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[465]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [466]:
kmeans_ch.labels_
Out[466]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
       1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1,
       0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
       1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1])
In [467]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[467]:
array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
       1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1,
       1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1,
       0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
       1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
       0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1])
In [468]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [469]:
X
Out[469]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 Cluster chosen
0 2.134448 -0.644769 -0.741425 -0.871269 0.189298 -0.623423 -0.529336 0.357204 -0.325100 0.746582 -0.476439 0.217416 1 0
1 -0.027862 1.690489 0.055411 -0.561004 0.495091 -0.306711 0.886175 -0.561042 1.431598 0.158038 0.235945 1.435645 1 0
2 -0.377201 2.125071 -0.304151 2.056775 -0.998633 -0.428416 1.821160 -1.229826 -0.732612 -1.686385 -0.297109 0.515440 0 0
3 -0.590231 0.143657 -0.601072 1.282246 -0.578812 -0.045810 1.094766 0.365693 1.431598 -0.618841 0.417565 0.135593 0 0
4 -0.233782 0.378000 1.366350 0.941326 -0.898589 -0.324641 1.117103 0.022453 1.431598 -0.507270 0.477771 -0.297988 0 0
5 -1.227009 0.847541 0.442158 -0.853849 0.281746 -0.856990 1.323879 -0.878380 0.254105 1.353628 -1.133848 -0.771482 1 0
6 -0.412709 0.592753 0.609235 -0.136011 0.556074 -0.212081 0.908709 0.462724 1.149973 1.353628 -0.748234 -0.690894 1 0
7 -0.724090 0.958854 0.128425 -0.061221 0.393577 -0.556616 0.938013 0.500038 1.431598 0.461706 -0.450195 -0.406088 1 0
8 2.134447 -0.911970 1.617930 -0.854742 -0.491688 -0.873878 -0.905227 1.840158 -0.631307 -0.682701 -0.960551 -0.147169 1 0
9 2.134447 -0.392687 1.219691 -0.556103 -0.426645 -0.134948 -0.847940 0.441485 -0.677993 0.490526 -0.789786 -0.234072 1 0
10 2.134447 -1.196966 -0.393629 -1.145266 -0.441885 -0.686755 -1.088890 -0.281516 -1.424181 0.084569 -1.220743 -1.033826 1 0
11 1.555294 -0.387633 1.984770 -0.288986 -0.227389 0.249804 -0.754853 2.092450 -0.616701 -0.159824 -0.411527 -0.481036 1 0
12 -0.104131 -1.361797 -0.426957 -1.264951 -0.817715 2.665316 -1.021180 -0.205321 -1.821663 -0.584671 -0.883267 -1.449205 0 0
13 -0.010446 -0.441706 0.232007 -0.347890 -0.731516 1.121770 -0.797580 2.092450 -0.205641 -0.471642 1.220384 -0.176572 0 0
14 0.338185 2.125071 -1.191115 0.600768 -0.845231 2.050341 1.870414 -1.054000 -1.031042 -1.723459 0.138895 -1.471125 0 0
15 0.639533 0.029127 -0.874094 0.222996 -0.545595 2.202264 -0.496728 -0.667599 0.029151 -1.291591 1.713019 -0.713484 0 0
16 1.192505 1.901961 -0.906081 -0.118624 -0.698455 1.792230 -0.847558 -0.879541 0.236249 -1.396153 1.713019 -0.946362 0 0
17 1.282493 -1.425745 1.152539 -1.289274 0.653371 -1.140708 -0.583855 2.092450 -1.821663 -1.624757 -1.464321 0.825445 1 0
18 -0.285085 -0.286095 0.719085 -0.553923 -0.239820 -0.233179 -0.248394 2.092450 -0.347790 0.612253 0.820535 0.965173 1 0
19 2.134447 -1.173436 -0.252515 -0.849286 1.426186 -0.931306 -0.744081 0.963581 -1.061327 0.051966 -0.724467 1.649501 1 0
20 -0.278722 0.441120 0.539755 0.073408 1.245563 -0.336695 0.870546 0.829705 0.507678 1.353628 0.336778 1.654823 1 0
21 -0.611002 -0.704814 -0.748805 -0.482771 0.602398 -0.299716 0.585889 0.316968 0.460384 1.353628 0.031354 0.538523 1 0
22 0.042172 0.682883 1.435959 0.203185 0.579082 -0.207847 1.042158 0.812349 0.787282 1.101013 -0.163470 1.833579 1 0
23 2.134448 0.023362 1.143286 0.064566 0.079445 1.697608 0.134204 1.514260 -0.602064 0.050733 1.157687 0.140809 0 0
24 0.973235 -0.202714 0.576270 -0.222220 -0.676313 0.245482 -0.104773 2.092451 0.270294 0.386750 0.176120 0.589755 1 0
25 2.134447 -0.628020 0.565132 -0.080497 0.062269 1.923406 -0.349591 1.577616 -0.738948 -0.236942 0.974727 -0.387827 0 0
26 -0.475449 -0.272759 -0.483306 2.568625 -0.432218 0.188443 -0.613425 -0.934384 -1.078606 -1.527243 1.713017 -1.230804 0 0
27 -1.274193 -1.259433 -0.866787 0.542028 -0.775059 -0.486970 -0.840968 1.043082 1.431597 -0.491858 -1.133072 -1.447443 0 0
28 -0.649286 2.125072 -0.798596 0.055357 1.448693 -0.258788 1.830622 -0.960651 0.580136 0.182651 -0.817420 1.057098 1 0
29 0.526348 0.918778 -1.174363 0.653683 -1.116289 0.446306 -0.968896 -0.898607 0.359556 -1.164241 1.713019 -1.326477 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
186 -1.319423 0.175014 -1.200954 -1.285944 -0.257920 -1.092671 -0.865905 -1.304258 1.431598 0.180121 -1.464321 -1.376110 1 1
187 -0.108219 2.125071 -0.213805 -0.324535 1.060404 -0.410798 2.017305 -0.960634 -0.126219 0.768136 -0.289177 0.648942 1 1
188 0.564307 -0.555458 -0.658358 0.591215 -0.731287 -0.224320 -0.776096 0.187651 1.431598 -0.214351 0.838382 -0.562694 0 1
189 0.641104 -0.440581 -0.583337 0.310188 -0.632755 -0.132319 -0.662616 -0.065449 1.431598 -0.606868 1.075355 -0.564249 0 1
190 0.573210 -0.763939 -0.684625 1.454301 -0.728330 0.803913 -0.748853 0.725106 1.431598 -0.372785 1.641772 -0.524504 0 1
191 0.308827 -0.760763 0.972410 -0.682585 1.394392 -0.667061 0.263256 2.092451 -1.002578 0.003476 -0.841976 0.597412 1 1
192 2.058242 -1.400292 0.536272 -1.288314 0.742803 -1.140708 -0.707389 2.092451 -1.470650 0.803010 -0.888001 1.611969 1 1
193 1.113210 -0.303602 1.445305 -0.282286 1.802868 -0.661326 -0.240367 2.092451 -0.790224 0.079975 -0.437082 1.116475 1 1
194 -0.450762 -0.010607 2.004804 -0.654718 0.376854 -0.729410 -0.771690 -0.278808 -0.906778 1.353628 -0.459874 1.481964 1 1
195 -0.185698 0.215292 1.601736 -0.053667 0.589974 -0.573847 0.000101 0.433513 0.029899 1.101702 -0.046786 1.833579 1 1
196 -0.759927 -0.321732 1.394992 -0.641057 0.457596 -0.862435 -0.236311 0.169558 -0.637600 0.890452 -0.491654 1.833579 1 1
197 2.134447 -0.466820 0.555283 -0.588683 -0.744840 0.462895 -0.666257 0.482783 -0.396687 0.433259 -0.525795 -0.479641 1 1
198 1.112509 -0.193649 0.414393 0.029650 -0.429733 0.928074 -0.345182 1.218287 0.542487 1.353628 0.718168 0.277470 1 1
199 0.905453 -0.524964 1.086044 -0.043938 -0.565400 0.694749 -0.521263 1.416977 -0.106187 1.353628 -0.300623 -0.411341 1 1
200 -0.899647 0.791525 -0.677180 1.189597 -0.058699 -0.824832 -0.542755 -0.582878 1.431598 -0.375258 -0.485654 -0.124219 0 1
201 -0.733318 2.125071 -0.643560 0.518336 0.700518 -0.615231 0.605320 -0.834916 0.796712 0.392851 -0.427106 -0.084736 1 1
202 -1.324292 0.274775 -1.200954 0.489476 2.489246 -1.088788 2.630785 -1.304258 -0.653881 -1.056812 -1.464109 -0.646684 1 1
203 -0.765770 -0.634466 -0.695396 -0.104253 1.440461 -0.292064 -0.160953 -0.262483 -0.585612 1.353628 -0.534450 0.252552 1 1
204 -1.298649 -0.865495 -1.182382 -1.154626 -0.789274 -1.140708 0.189079 -1.172061 -0.779701 0.811604 -1.201474 1.833579 1 1
205 -0.432584 0.228569 0.159869 -0.537903 1.169801 -0.537451 1.083646 0.017223 -0.308391 1.353628 -0.612028 0.693463 1 1
206 -0.098153 1.506263 0.399753 -0.402570 0.366102 0.205628 1.813053 0.465722 1.228385 1.353628 0.351359 0.987155 1 1
207 -0.684201 1.261357 0.827726 -0.398593 1.376622 -0.085790 0.419996 -0.263408 0.575117 1.353628 -0.215556 0.466853 1 1
208 0.057892 1.228574 0.545400 -0.211451 2.273548 0.295789 2.675786 -0.458831 -0.436474 0.159917 -0.736888 0.615728 1 1
209 1.529114 -1.141477 -0.952529 -0.312120 -1.111930 -0.845550 -0.966860 0.531829 1.431598 -0.891915 0.316064 -1.051750 0 1
210 0.792164 -1.425745 -1.171065 2.751983 -1.223875 0.047688 -1.104958 1.841305 0.432920 -1.723459 -0.123688 -1.471125 0 1
211 1.033808 -1.411737 -0.414286 0.062690 -1.139737 -0.864229 -1.024376 0.619133 1.431598 -1.515595 -0.325421 -1.435723 0 1
212 0.856046 -0.404521 -0.808726 0.490430 -0.734186 0.939368 -0.730764 -0.618840 1.257115 -0.514941 1.713019 -0.664148 0 1
213 1.074157 -0.156701 -0.547902 1.101943 -0.759892 0.974616 -0.748565 -0.668486 1.431598 0.078809 1.447463 -0.508043 0 1
214 -0.466039 1.901913 -0.716582 -0.377357 1.260897 0.205794 0.988225 -0.034946 -0.146391 1.353628 -0.154870 0.032420 1 1
215 -1.281934 -1.409413 1.316491 -1.289274 -1.223875 2.665316 -1.067543 1.859460 -1.821663 1.125144 0.068239 -1.471125 1 1

216 rows × 14 columns

In [470]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[470]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b8299f4fd0>
In [471]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

Hotel Marrakech

ANN

In [308]:
X = df_n_ps_std_ch[3]
In [309]:
y = df_n_ps[3]['chosen']
In [310]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [311]:
X_train.shape
Out[311]:
(108, 12)
In [312]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [313]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [314]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [479]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.001, 'max_iter': 1000}, que permiten obtener un Accuracy de 74.07% y un Kappa del 46.20
Tiempo total: 21.22 minutos
In [315]:
grid.best_params_={'activation': 'tanh', 'hidden_layer_sizes': (30,), 'learning_rate_init': 0.001, 'max_iter': 1000}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [316]:
input_tensor = Input(shape = (n0,))
In [317]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [318]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [319]:
model.summary()
Model: "model_18"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_19 (InputLayer)        (None, 12)                0         
_________________________________________________________________
dense_52 (Dense)             (None, 30)                390       
_________________________________________________________________
dense_53 (Dense)             (None, 1)                 31        
=================================================================
Total params: 421
Trainable params: 421
Non-trainable params: 0
_________________________________________________________________
In [320]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 108 samples, validate on 36 samples
Epoch 1/1000
108/108 [==============================] - 0s 1ms/step - loss: 0.8036 - accuracy: 0.5278 - val_loss: 0.8239 - val_accuracy: 0.5000
Epoch 2/1000
108/108 [==============================] - 0s 93us/step - loss: 0.7836 - accuracy: 0.5370 - val_loss: 0.8023 - val_accuracy: 0.5000
Epoch 3/1000
108/108 [==============================] - 0s 83us/step - loss: 0.7693 - accuracy: 0.5556 - val_loss: 0.7850 - val_accuracy: 0.5278
Epoch 4/1000
108/108 [==============================] - 0s 93us/step - loss: 0.7543 - accuracy: 0.5648 - val_loss: 0.7700 - val_accuracy: 0.5556
Epoch 5/1000
108/108 [==============================] - 0s 102us/step - loss: 0.7421 - accuracy: 0.5926 - val_loss: 0.7555 - val_accuracy: 0.5278
Epoch 6/1000
108/108 [==============================] - 0s 93us/step - loss: 0.7308 - accuracy: 0.6019 - val_loss: 0.7443 - val_accuracy: 0.5556
Epoch 7/1000
108/108 [==============================] - 0s 83us/step - loss: 0.7202 - accuracy: 0.5833 - val_loss: 0.7343 - val_accuracy: 0.5833
Epoch 8/1000
108/108 [==============================] - 0s 93us/step - loss: 0.7117 - accuracy: 0.5926 - val_loss: 0.7238 - val_accuracy: 0.5833
Epoch 9/1000
108/108 [==============================] - 0s 93us/step - loss: 0.7036 - accuracy: 0.5926 - val_loss: 0.7153 - val_accuracy: 0.6111
Epoch 10/1000
108/108 [==============================] - 0s 74us/step - loss: 0.6970 - accuracy: 0.5741 - val_loss: 0.7072 - val_accuracy: 0.6111
Epoch 11/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6909 - accuracy: 0.5648 - val_loss: 0.7013 - val_accuracy: 0.6111
Epoch 12/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6852 - accuracy: 0.5741 - val_loss: 0.6959 - val_accuracy: 0.6111
Epoch 13/1000
108/108 [==============================] - 0s 74us/step - loss: 0.6810 - accuracy: 0.5741 - val_loss: 0.6913 - val_accuracy: 0.6111
Epoch 14/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6764 - accuracy: 0.5833 - val_loss: 0.6882 - val_accuracy: 0.6111
Epoch 15/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6729 - accuracy: 0.6019 - val_loss: 0.6847 - val_accuracy: 0.6389
Epoch 16/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6694 - accuracy: 0.5926 - val_loss: 0.6827 - val_accuracy: 0.6389
Epoch 17/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6663 - accuracy: 0.5741 - val_loss: 0.6807 - val_accuracy: 0.6389
Epoch 18/1000
108/108 [==============================] - 0s 74us/step - loss: 0.6638 - accuracy: 0.5741 - val_loss: 0.6792 - val_accuracy: 0.6667
Epoch 19/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6612 - accuracy: 0.5833 - val_loss: 0.6784 - val_accuracy: 0.6667
Epoch 20/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6584 - accuracy: 0.5648 - val_loss: 0.6764 - val_accuracy: 0.6667
Epoch 21/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6561 - accuracy: 0.5648 - val_loss: 0.6752 - val_accuracy: 0.6667
Epoch 22/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6545 - accuracy: 0.5741 - val_loss: 0.6738 - val_accuracy: 0.6667
Epoch 23/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6520 - accuracy: 0.5741 - val_loss: 0.6722 - val_accuracy: 0.6944
Epoch 24/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6504 - accuracy: 0.5833 - val_loss: 0.6711 - val_accuracy: 0.6667
Epoch 25/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6490 - accuracy: 0.5741 - val_loss: 0.6704 - val_accuracy: 0.6667
Epoch 26/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6466 - accuracy: 0.5833 - val_loss: 0.6701 - val_accuracy: 0.6667
Epoch 27/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6449 - accuracy: 0.5833 - val_loss: 0.6694 - val_accuracy: 0.6667
Epoch 28/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6431 - accuracy: 0.5741 - val_loss: 0.6670 - val_accuracy: 0.6667
Epoch 29/1000
108/108 [==============================] - 0s 74us/step - loss: 0.6416 - accuracy: 0.5648 - val_loss: 0.6651 - val_accuracy: 0.6667
Epoch 30/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6401 - accuracy: 0.5741 - val_loss: 0.6630 - val_accuracy: 0.6667
Epoch 31/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6382 - accuracy: 0.5648 - val_loss: 0.6618 - val_accuracy: 0.6667
Epoch 32/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6367 - accuracy: 0.5833 - val_loss: 0.6612 - val_accuracy: 0.6667
Epoch 33/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6353 - accuracy: 0.6019 - val_loss: 0.6607 - val_accuracy: 0.6667

Epoch 00033: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 34/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6343 - accuracy: 0.6019 - val_loss: 0.6601 - val_accuracy: 0.6667
Epoch 35/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6337 - accuracy: 0.6019 - val_loss: 0.6598 - val_accuracy: 0.6389
Epoch 36/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6330 - accuracy: 0.6019 - val_loss: 0.6598 - val_accuracy: 0.6389
Epoch 37/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6324 - accuracy: 0.6019 - val_loss: 0.6591 - val_accuracy: 0.6389
Epoch 38/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6317 - accuracy: 0.6111 - val_loss: 0.6580 - val_accuracy: 0.6111
Epoch 39/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6312 - accuracy: 0.6111 - val_loss: 0.6570 - val_accuracy: 0.6111
Epoch 40/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6307 - accuracy: 0.6111 - val_loss: 0.6563 - val_accuracy: 0.6111
Epoch 41/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6301 - accuracy: 0.6111 - val_loss: 0.6557 - val_accuracy: 0.6111
Epoch 42/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6293 - accuracy: 0.6111 - val_loss: 0.6551 - val_accuracy: 0.6111
Epoch 43/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6288 - accuracy: 0.6111 - val_loss: 0.6548 - val_accuracy: 0.6111

Epoch 00043: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 44/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6283 - accuracy: 0.6111 - val_loss: 0.6545 - val_accuracy: 0.6111
Epoch 45/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6280 - accuracy: 0.6111 - val_loss: 0.6543 - val_accuracy: 0.6111
Epoch 46/1000
108/108 [==============================] - ETA: 0s - loss: 0.5806 - accuracy: 0.68 - 0s 111us/step - loss: 0.6277 - accuracy: 0.6111 - val_loss: 0.6540 - val_accuracy: 0.6389
Epoch 47/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6274 - accuracy: 0.6111 - val_loss: 0.6539 - val_accuracy: 0.6389
Epoch 48/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6272 - accuracy: 0.6019 - val_loss: 0.6537 - val_accuracy: 0.6389
Epoch 49/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6270 - accuracy: 0.6204 - val_loss: 0.6535 - val_accuracy: 0.6389
Epoch 50/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6266 - accuracy: 0.6204 - val_loss: 0.6531 - val_accuracy: 0.6389
Epoch 51/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6263 - accuracy: 0.6111 - val_loss: 0.6526 - val_accuracy: 0.6389
Epoch 52/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6261 - accuracy: 0.6111 - val_loss: 0.6521 - val_accuracy: 0.6389
Epoch 53/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6259 - accuracy: 0.6111 - val_loss: 0.6516 - val_accuracy: 0.6389

Epoch 00053: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 54/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6255 - accuracy: 0.6111 - val_loss: 0.6514 - val_accuracy: 0.6389
Epoch 55/1000
108/108 [==============================] - 0s 83us/step - loss: 0.6254 - accuracy: 0.6111 - val_loss: 0.6512 - val_accuracy: 0.6389
Epoch 56/1000
108/108 [==============================] - 0s 74us/step - loss: 0.6253 - accuracy: 0.6111 - val_loss: 0.6510 - val_accuracy: 0.6389
Epoch 57/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6251 - accuracy: 0.6111 - val_loss: 0.6507 - val_accuracy: 0.6389
Epoch 58/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6250 - accuracy: 0.6111 - val_loss: 0.6505 - val_accuracy: 0.6389
Epoch 59/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6248 - accuracy: 0.6111 - val_loss: 0.6504 - val_accuracy: 0.6389
Epoch 60/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6247 - accuracy: 0.6111 - val_loss: 0.6502 - val_accuracy: 0.6389
Epoch 61/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6246 - accuracy: 0.6111 - val_loss: 0.6501 - val_accuracy: 0.6389
Epoch 62/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6244 - accuracy: 0.6204 - val_loss: 0.6498 - val_accuracy: 0.6389
Epoch 63/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6242 - accuracy: 0.6204 - val_loss: 0.6496 - val_accuracy: 0.6389

Epoch 00063: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 64/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6241 - accuracy: 0.6204 - val_loss: 0.6495 - val_accuracy: 0.6389
Epoch 65/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6240 - accuracy: 0.6204 - val_loss: 0.6494 - val_accuracy: 0.6389
Epoch 66/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6239 - accuracy: 0.6204 - val_loss: 0.6493 - val_accuracy: 0.6389
Epoch 67/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6239 - accuracy: 0.6204 - val_loss: 0.6493 - val_accuracy: 0.6389
Epoch 68/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6238 - accuracy: 0.6204 - val_loss: 0.6492 - val_accuracy: 0.6389
Epoch 69/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6237 - accuracy: 0.6204 - val_loss: 0.6491 - val_accuracy: 0.6389
Epoch 70/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6237 - accuracy: 0.6204 - val_loss: 0.6491 - val_accuracy: 0.6389
Epoch 71/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6236 - accuracy: 0.6204 - val_loss: 0.6490 - val_accuracy: 0.6389
Epoch 72/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6235 - accuracy: 0.6204 - val_loss: 0.6490 - val_accuracy: 0.6389
Epoch 73/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6234 - accuracy: 0.6204 - val_loss: 0.6490 - val_accuracy: 0.6389

Epoch 00073: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 74/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6234 - accuracy: 0.6204 - val_loss: 0.6490 - val_accuracy: 0.6389
Epoch 75/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6234 - accuracy: 0.6204 - val_loss: 0.6489 - val_accuracy: 0.6389
Epoch 76/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6233 - accuracy: 0.6204 - val_loss: 0.6489 - val_accuracy: 0.6389
Epoch 77/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6233 - accuracy: 0.6204 - val_loss: 0.6489 - val_accuracy: 0.6389
Epoch 78/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6233 - accuracy: 0.6204 - val_loss: 0.6488 - val_accuracy: 0.6389
Epoch 79/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6232 - accuracy: 0.6204 - val_loss: 0.6488 - val_accuracy: 0.6389
Epoch 80/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6232 - accuracy: 0.6204 - val_loss: 0.6488 - val_accuracy: 0.6389
Epoch 81/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6232 - accuracy: 0.6204 - val_loss: 0.6488 - val_accuracy: 0.6389
Epoch 82/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6231 - accuracy: 0.6204 - val_loss: 0.6488 - val_accuracy: 0.6389
Epoch 83/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6231 - accuracy: 0.6204 - val_loss: 0.6488 - val_accuracy: 0.6389

Epoch 00083: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 84/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6231 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 85/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6230 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 86/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6230 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 87/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6230 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 88/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6230 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 89/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6230 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 90/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6230 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 91/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 92/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 93/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389

Epoch 00093: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 94/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 95/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 96/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 97/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 98/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6229 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 99/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 100/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6487 - val_accuracy: 0.6389
Epoch 101/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 102/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 103/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00103: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 104/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 105/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 106/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 107/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 108/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 109/1000
108/108 [==============================] - ETA: 0s - loss: 0.6117 - accuracy: 0.65 - 0s 102us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 110/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 111/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 112/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 113/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00113: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 114/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 115/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 116/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 117/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6228 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 118/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 119/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 120/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 121/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 122/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 123/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00123: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 124/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 125/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 126/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 127/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 128/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 129/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 130/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 131/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 132/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 133/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00133: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 134/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 135/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 136/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 137/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 138/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 139/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 140/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 141/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 142/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 143/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00143: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 144/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 145/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 146/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 147/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 148/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 149/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 150/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 151/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 152/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 153/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00153: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 154/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 155/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 156/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 157/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 158/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 159/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 160/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 161/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 162/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 163/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00163: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 164/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 165/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 166/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 167/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 168/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 169/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 170/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 171/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 172/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 173/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00173: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 174/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 175/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 176/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 177/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 178/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 179/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 180/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 181/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 182/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 183/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00183: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 184/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 185/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 186/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 187/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 188/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 189/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 190/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 191/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 192/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 193/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00193: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 194/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 195/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 196/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 197/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 198/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 199/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 200/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 201/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 202/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 203/1000
108/108 [==============================] - ETA: 0s - loss: 0.6057 - accuracy: 0.59 - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00203: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 204/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 205/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 206/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 207/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 208/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 209/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 210/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 211/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 212/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 213/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00213: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 214/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 215/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 216/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 217/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 218/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 219/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 220/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 221/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 222/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 223/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00223: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 224/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 225/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 226/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 227/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 228/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 229/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 230/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 231/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 232/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 233/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00233: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 234/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 235/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 236/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 237/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 238/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 239/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 240/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 241/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 242/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 243/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00243: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 244/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 245/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 246/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 247/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 248/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 249/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 250/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 251/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 252/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 253/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00253: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 254/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 255/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 256/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 257/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 258/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 259/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 260/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 261/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 262/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 263/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00263: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 264/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 265/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 266/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 267/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 268/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 269/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 270/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 271/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 272/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 273/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00273: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 274/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 275/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 276/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 277/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 278/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 279/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 280/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 281/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 282/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 283/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00283: ReduceLROnPlateau reducing learning rate to 1.4901161901614834e-11.
Epoch 284/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 285/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 286/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 287/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 288/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 289/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 290/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 291/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 292/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 293/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00293: ReduceLROnPlateau reducing learning rate to 7.450580950807417e-12.
Epoch 294/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 295/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 296/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 297/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 298/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 299/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 300/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 301/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 302/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 303/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00303: ReduceLROnPlateau reducing learning rate to 3.725290475403709e-12.
Epoch 304/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 305/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 306/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 307/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 308/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 309/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 310/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 311/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 312/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 313/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00313: ReduceLROnPlateau reducing learning rate to 1.8626452377018543e-12.
Epoch 314/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 315/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 316/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 317/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 318/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 319/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 320/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 321/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 322/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 323/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00323: ReduceLROnPlateau reducing learning rate to 9.313226188509272e-13.
Epoch 324/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 325/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 326/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 327/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 328/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 329/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 330/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 331/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 332/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 333/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00333: ReduceLROnPlateau reducing learning rate to 4.656613094254636e-13.
Epoch 334/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 335/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 336/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 337/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 338/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 339/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 340/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 341/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 342/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 343/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00343: ReduceLROnPlateau reducing learning rate to 2.328306547127318e-13.
Epoch 344/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 345/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 346/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 347/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 348/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 349/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 350/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 351/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 352/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 353/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00353: ReduceLROnPlateau reducing learning rate to 1.164153273563659e-13.
Epoch 354/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 355/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 356/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 357/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 358/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 359/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 360/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 361/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 362/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 363/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00363: ReduceLROnPlateau reducing learning rate to 5.820766367818295e-14.
Epoch 364/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 365/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 366/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 367/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 368/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 369/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 370/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 371/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 372/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 373/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00373: ReduceLROnPlateau reducing learning rate to 2.9103831839091474e-14.
Epoch 374/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 375/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 376/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 377/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 378/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 379/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 380/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 381/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 382/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 383/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00383: ReduceLROnPlateau reducing learning rate to 1.4551915919545737e-14.
Epoch 384/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 385/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 386/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 387/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 388/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 389/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 390/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 391/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 392/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 393/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00393: ReduceLROnPlateau reducing learning rate to 7.275957959772868e-15.
Epoch 394/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 395/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 396/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 397/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 398/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 399/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 400/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 401/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 402/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 403/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00403: ReduceLROnPlateau reducing learning rate to 3.637978979886434e-15.
Epoch 404/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 405/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 406/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 407/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 408/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 409/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 410/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 411/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 412/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 413/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00413: ReduceLROnPlateau reducing learning rate to 1.818989489943217e-15.
Epoch 414/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 415/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 416/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 417/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 418/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 419/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 420/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 421/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 422/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 423/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00423: ReduceLROnPlateau reducing learning rate to 9.094947449716085e-16.
Epoch 424/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 425/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 426/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 427/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 428/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 429/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 430/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 431/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 432/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 433/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00433: ReduceLROnPlateau reducing learning rate to 4.547473724858043e-16.
Epoch 434/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 435/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 436/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 437/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 438/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 439/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 440/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 441/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 442/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 443/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00443: ReduceLROnPlateau reducing learning rate to 2.2737368624290214e-16.
Epoch 444/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 445/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 446/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 447/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 448/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 449/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 450/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 451/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 452/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 453/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00453: ReduceLROnPlateau reducing learning rate to 1.1368684312145107e-16.
Epoch 454/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 455/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 456/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 457/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 458/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 459/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 460/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 461/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 462/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 463/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00463: ReduceLROnPlateau reducing learning rate to 5.684342156072553e-17.
Epoch 464/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 465/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 466/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 467/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 468/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 469/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 470/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 471/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 472/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 473/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00473: ReduceLROnPlateau reducing learning rate to 2.842171078036277e-17.
Epoch 474/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 475/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 476/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 477/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 478/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 479/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 480/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 481/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 482/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 483/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00483: ReduceLROnPlateau reducing learning rate to 1.4210855390181384e-17.
Epoch 484/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 485/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 486/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 487/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 488/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 489/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 490/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 491/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 492/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 493/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00493: ReduceLROnPlateau reducing learning rate to 7.105427695090692e-18.
Epoch 494/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 495/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 496/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 497/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 498/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 499/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 500/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 501/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 502/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 503/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00503: ReduceLROnPlateau reducing learning rate to 3.552713847545346e-18.
Epoch 504/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 505/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 506/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 507/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 508/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 509/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 510/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 511/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 512/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 513/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00513: ReduceLROnPlateau reducing learning rate to 1.776356923772673e-18.
Epoch 514/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 515/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 516/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 517/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 518/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 519/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 520/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 521/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 522/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 523/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00523: ReduceLROnPlateau reducing learning rate to 8.881784618863365e-19.
Epoch 524/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 525/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 526/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 527/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 528/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 529/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 530/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 531/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 532/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 533/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00533: ReduceLROnPlateau reducing learning rate to 4.440892309431682e-19.
Epoch 534/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 535/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 536/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 537/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 538/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 539/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 540/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 541/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 542/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 543/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00543: ReduceLROnPlateau reducing learning rate to 2.220446154715841e-19.
Epoch 544/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 545/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 546/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 547/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 548/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 549/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 550/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 551/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 552/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 553/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00553: ReduceLROnPlateau reducing learning rate to 1.1102230773579206e-19.
Epoch 554/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 555/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 556/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 557/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 558/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 559/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 560/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 561/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 562/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 563/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00563: ReduceLROnPlateau reducing learning rate to 5.551115386789603e-20.
Epoch 564/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 565/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 566/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 567/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 568/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 569/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 570/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 571/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 572/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 573/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00573: ReduceLROnPlateau reducing learning rate to 2.7755576933948015e-20.
Epoch 574/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 575/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 576/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 577/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 578/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 579/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 580/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 581/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 582/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 583/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00583: ReduceLROnPlateau reducing learning rate to 1.3877788466974007e-20.
Epoch 584/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 585/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 586/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 587/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 588/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 589/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 590/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 591/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 592/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 593/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00593: ReduceLROnPlateau reducing learning rate to 6.938894233487004e-21.
Epoch 594/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 595/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 596/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 597/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 598/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 599/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 600/1000
108/108 [==============================] - 0s 222us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 601/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 602/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 603/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00603: ReduceLROnPlateau reducing learning rate to 3.469447116743502e-21.
Epoch 604/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 605/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 606/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 607/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 608/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 609/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 610/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 611/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 612/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 613/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00613: ReduceLROnPlateau reducing learning rate to 1.734723558371751e-21.
Epoch 614/1000
108/108 [==============================] - 0s 204us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 615/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 616/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 617/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 618/1000
108/108 [==============================] - 0s 194us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 619/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 620/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 621/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 622/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 623/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00623: ReduceLROnPlateau reducing learning rate to 8.673617791858755e-22.
Epoch 624/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 625/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 626/1000
108/108 [==============================] - ETA: 0s - loss: 0.5830 - accuracy: 0.68 - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 627/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 628/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 629/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 630/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 631/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 632/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 633/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00633: ReduceLROnPlateau reducing learning rate to 4.336808895929377e-22.
Epoch 634/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 635/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 636/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 637/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 638/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 639/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 640/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 641/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 642/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 643/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00643: ReduceLROnPlateau reducing learning rate to 2.1684044479646887e-22.
Epoch 644/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 645/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 646/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 647/1000
108/108 [==============================] - 0s 416us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 648/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 649/1000
108/108 [==============================] - ETA: 0s - loss: 0.6038 - accuracy: 0.68 - 0s 342us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 650/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 651/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 652/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 653/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00653: ReduceLROnPlateau reducing learning rate to 1.0842022239823443e-22.
Epoch 654/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 655/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 656/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 657/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 658/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 659/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 660/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 661/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 662/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 663/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00663: ReduceLROnPlateau reducing learning rate to 5.421011119911722e-23.
Epoch 664/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 665/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 666/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 667/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 668/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 669/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 670/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 671/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 672/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 673/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00673: ReduceLROnPlateau reducing learning rate to 2.710505559955861e-23.
Epoch 674/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 675/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 676/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 677/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 678/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 679/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 680/1000
108/108 [==============================] - 0s 324us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 681/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 682/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 683/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00683: ReduceLROnPlateau reducing learning rate to 1.3552527799779304e-23.
Epoch 684/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 685/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 686/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 687/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 688/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 689/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 690/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 691/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 692/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 693/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00693: ReduceLROnPlateau reducing learning rate to 6.776263899889652e-24.
Epoch 694/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 695/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 696/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 697/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 698/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 699/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 700/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 701/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 702/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 703/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00703: ReduceLROnPlateau reducing learning rate to 3.388131949944826e-24.
Epoch 704/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 705/1000
108/108 [==============================] - 0s 305us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 706/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 707/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 708/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 709/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 710/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 711/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 712/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 713/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00713: ReduceLROnPlateau reducing learning rate to 1.694065974972413e-24.
Epoch 714/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 715/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 716/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 717/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 718/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 719/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 720/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 721/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 722/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 723/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00723: ReduceLROnPlateau reducing learning rate to 8.470329874862065e-25.
Epoch 724/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 725/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 726/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 727/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 728/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 729/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 730/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 731/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 732/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 733/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00733: ReduceLROnPlateau reducing learning rate to 4.2351649374310325e-25.
Epoch 734/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 735/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 736/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 737/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 738/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 739/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 740/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 741/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 742/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 743/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00743: ReduceLROnPlateau reducing learning rate to 2.1175824687155163e-25.
Epoch 744/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 745/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 746/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 747/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 748/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 749/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 750/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 751/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 752/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 753/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00753: ReduceLROnPlateau reducing learning rate to 1.0587912343577581e-25.
Epoch 754/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 755/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 756/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 757/1000
108/108 [==============================] - ETA: 0s - loss: 0.6535 - accuracy: 0.56 - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 758/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 759/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 760/1000
108/108 [==============================] - 0s 194us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 761/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 762/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 763/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00763: ReduceLROnPlateau reducing learning rate to 5.293956171788791e-26.
Epoch 764/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 765/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 766/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 767/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 768/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 769/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 770/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 771/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 772/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 773/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00773: ReduceLROnPlateau reducing learning rate to 2.6469780858943953e-26.
Epoch 774/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 775/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 776/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 777/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 778/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 779/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 780/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 781/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 782/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 783/1000
108/108 [==============================] - 0s 315us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00783: ReduceLROnPlateau reducing learning rate to 1.3234890429471977e-26.
Epoch 784/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 785/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 786/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 787/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 788/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 789/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 790/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 791/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 792/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 793/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00793: ReduceLROnPlateau reducing learning rate to 6.617445214735988e-27.
Epoch 794/1000
108/108 [==============================] - 0s 213us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 795/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 796/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 797/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 798/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 799/1000
108/108 [==============================] - ETA: 0s - loss: 0.5922 - accuracy: 0.62 - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 800/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 801/1000
108/108 [==============================] - 0s 194us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 802/1000
108/108 [==============================] - 0s 176us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 803/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00803: ReduceLROnPlateau reducing learning rate to 3.308722607367994e-27.
Epoch 804/1000
108/108 [==============================] - 0s 194us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 805/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 806/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 807/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 808/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 809/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 810/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 811/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 812/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 813/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00813: ReduceLROnPlateau reducing learning rate to 1.654361303683997e-27.
Epoch 814/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 815/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 816/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 817/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 818/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 819/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 820/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 821/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 822/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 823/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00823: ReduceLROnPlateau reducing learning rate to 8.271806518419985e-28.
Epoch 824/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 825/1000
108/108 [==============================] - 0s 93us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 826/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 827/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 828/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 829/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 830/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 831/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 832/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 833/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00833: ReduceLROnPlateau reducing learning rate to 4.135903259209993e-28.
Epoch 834/1000
108/108 [==============================] - ETA: 0s - loss: 0.6172 - accuracy: 0.65 - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 835/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 836/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 837/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 838/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 839/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 840/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 841/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 842/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 843/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00843: ReduceLROnPlateau reducing learning rate to 2.0679516296049964e-28.
Epoch 844/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 845/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 846/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 847/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 848/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 849/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 850/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 851/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 852/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 853/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00853: ReduceLROnPlateau reducing learning rate to 1.0339758148024982e-28.
Epoch 854/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 855/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 856/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 857/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 858/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 859/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 860/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 861/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 862/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 863/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00863: ReduceLROnPlateau reducing learning rate to 5.169879074012491e-29.
Epoch 864/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 865/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 866/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 867/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 868/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 869/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 870/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 871/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 872/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 873/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00873: ReduceLROnPlateau reducing learning rate to 2.5849395370062454e-29.
Epoch 874/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 875/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 876/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 877/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 878/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 879/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 880/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 881/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 882/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 883/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00883: ReduceLROnPlateau reducing learning rate to 1.2924697685031227e-29.
Epoch 884/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 885/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 886/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 887/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 888/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 889/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 890/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 891/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 892/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 893/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00893: ReduceLROnPlateau reducing learning rate to 6.462348842515614e-30.
Epoch 894/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 895/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 896/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 897/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 898/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 899/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 900/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 901/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 902/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 903/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00903: ReduceLROnPlateau reducing learning rate to 3.231174421257807e-30.
Epoch 904/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 905/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 906/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 907/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 908/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 909/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 910/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 911/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 912/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 913/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00913: ReduceLROnPlateau reducing learning rate to 1.6155872106289034e-30.
Epoch 914/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 915/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 916/1000
108/108 [==============================] - 0s 157us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 917/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 918/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 919/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 920/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 921/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 922/1000
108/108 [==============================] - 0s 185us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 923/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00923: ReduceLROnPlateau reducing learning rate to 8.077936053144517e-31.
Epoch 924/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 925/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 926/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 927/1000
108/108 [==============================] - ETA: 0s - loss: 0.6531 - accuracy: 0.53 - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 928/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 929/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 930/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 931/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 932/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 933/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00933: ReduceLROnPlateau reducing learning rate to 4.0389680265722585e-31.
Epoch 934/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 935/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 936/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 937/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 938/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 939/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 940/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 941/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 942/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 943/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00943: ReduceLROnPlateau reducing learning rate to 2.0194840132861292e-31.
Epoch 944/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 945/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 946/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 947/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 948/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 949/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 950/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 951/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 952/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 953/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00953: ReduceLROnPlateau reducing learning rate to 1.0097420066430646e-31.
Epoch 954/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 955/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 956/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 957/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 958/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 959/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 960/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 961/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 962/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 963/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00963: ReduceLROnPlateau reducing learning rate to 5.048710033215323e-32.
Epoch 964/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 965/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 966/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 967/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 968/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 969/1000
108/108 [==============================] - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 970/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 971/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 972/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 973/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00973: ReduceLROnPlateau reducing learning rate to 2.5243550166076616e-32.
Epoch 974/1000
108/108 [==============================] - 0s 102us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 975/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 976/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 977/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 978/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 979/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 980/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 981/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 982/1000
108/108 [==============================] - 0s 148us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 983/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00983: ReduceLROnPlateau reducing learning rate to 1.2621775083038308e-32.
Epoch 984/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 985/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 986/1000
108/108 [==============================] - ETA: 0s - loss: 0.6357 - accuracy: 0.62 - 0s 167us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 987/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 988/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 989/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 990/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 991/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 992/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 993/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389

Epoch 00993: ReduceLROnPlateau reducing learning rate to 6.310887541519154e-33.
Epoch 994/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 995/1000
108/108 [==============================] - 0s 130us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 996/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 997/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 998/1000
108/108 [==============================] - 0s 139us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 999/1000
108/108 [==============================] - 0s 120us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
Epoch 1000/1000
108/108 [==============================] - 0s 111us/step - loss: 0.6227 - accuracy: 0.6204 - val_loss: 0.6486 - val_accuracy: 0.6389
In [321]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 1000)
In [322]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
36/36 [==============================] - 0s 111us/step
test loss: 0.648619532585144, test accuracy: 0.6388888955116272
In [323]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.684375
In [324]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.22516556291390732
[[18  2]
 [11  5]]

KMeans

In [490]:
X
Out[490]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12
0 -0.580880 -0.990276 0.508438 0.658218 1.668751 -0.289029 -0.581158 1.476860 0.248923 1.428276 -0.870636 -0.174217
1 -0.886270 1.680400 1.708314 0.223308 0.928680 -0.795001 0.621735 -0.834425 -0.922785 -0.428866 -1.169994 -0.427540
2 -0.133384 0.811454 -1.227033 -0.981611 -1.054713 -1.027505 -0.745868 0.198884 0.508413 -1.114895 -0.312476 1.844563
3 -0.551071 0.259668 -1.073758 -0.208557 0.002105 -0.655399 -0.056661 -1.206025 -1.133355 -1.217881 -0.812411 1.844563
4 -1.234016 -0.118636 -1.227033 -0.725200 -1.112673 -0.992824 -0.809889 -1.428072 -1.315307 -1.379943 -0.903603 1.844563
5 1.505233 0.156720 1.279534 0.976338 0.335058 1.153170 -0.342400 -0.203256 -0.952802 -0.844743 1.303172 -0.087828
6 1.505233 -0.276775 0.478146 0.610910 1.357200 2.163626 0.746385 -0.601441 -0.891301 -0.864699 0.723335 -0.218435
7 -1.098308 2.406593 0.908371 -0.871045 1.373331 -0.134337 2.214860 -0.690075 0.702474 -0.659995 -0.671389 -0.457981
8 1.505233 0.396108 -0.186683 -0.340118 -1.050443 -0.431777 -0.808361 -0.443038 -1.175635 -1.295109 0.226236 -0.613691
9 -0.545948 0.718771 -0.767661 -0.855150 0.352508 -0.644761 0.330241 0.685662 0.593385 1.260081 0.858576 1.844563
10 -0.759467 1.402120 1.708315 1.252125 0.573961 0.248571 1.668183 -0.726951 -0.188602 -0.002148 -0.093469 0.787068
11 -0.626015 2.019019 0.765623 1.228928 2.278317 0.591124 2.703718 -0.377378 1.164753 -0.756752 -0.671882 -0.027193
12 -0.707480 0.753456 1.102092 -0.728084 0.729813 -0.794273 -0.114544 0.663648 1.218463 1.428276 0.637503 0.904412
13 0.053762 1.374763 -0.064790 -0.121229 -0.205398 -0.641216 1.872586 -0.187941 1.498055 0.280727 0.185105 -0.176726
14 -1.007854 1.946086 0.162150 -0.040380 1.209596 -0.283561 2.703718 -0.479048 1.139185 -0.094942 -0.241802 -0.230658
15 -1.148500 -1.038639 1.466504 -0.903609 0.186581 -0.985592 -0.582354 2.493864 -0.658620 -0.409259 -1.142408 -0.002720
16 0.580515 0.408966 0.759067 -1.001060 0.697356 -1.027505 0.420872 -1.192638 -1.046819 1.428276 -1.213118 -0.620821
17 0.828864 -0.024616 1.708315 0.336834 2.119154 -0.083648 -0.260916 0.125194 -0.252140 0.315916 -0.554175 1.271464
18 -1.323765 -1.071100 -1.227033 -1.001060 -0.802220 -0.814440 -0.902727 -1.012315 -0.180743 1.428276 -1.211184 -1.197199
19 1.258380 -0.980344 -1.169455 -0.895307 0.784365 0.582611 -0.533001 0.319883 -1.027202 1.428276 -1.014103 0.265376
20 0.584754 2.257694 1.617268 2.780448 0.474336 0.004282 -0.015547 -1.203892 -1.203690 -1.381076 -1.170062 -0.610268
21 0.872730 2.337993 1.708314 -0.492713 -0.703382 -0.834409 0.298125 -0.068247 0.445576 1.226208 -1.003116 -0.229748
22 1.505233 -0.325266 0.239552 -0.911432 0.097951 -0.791099 -0.494947 1.831184 -0.367337 0.094547 -0.372461 1.678056
23 -1.341382 1.475366 1.708314 -0.430421 -0.137473 -0.924961 0.497402 -1.323459 -1.068884 1.412572 -1.166306 -1.088362
24 1.505233 -0.058685 1.279528 0.485516 -0.774672 -0.465361 -0.570279 -0.818106 -0.541573 -0.742412 -0.782598 0.830590
25 1.505233 -0.199351 1.568596 1.912238 -0.689720 -0.366603 -0.547507 0.350096 0.290752 -0.505127 0.813170 0.107592
26 1.084848 -0.023978 1.708314 2.367620 -0.203979 1.003510 0.238525 0.369125 0.271989 -0.249002 -0.148803 1.184233
27 0.552717 0.139915 0.547890 -0.898140 0.438535 -0.781040 0.355213 2.187005 -0.064105 0.877837 -0.435347 1.844563
28 -0.960017 -0.983960 -1.209972 -0.300931 -1.083476 -0.784556 0.184926 -0.959029 0.169592 -0.457636 0.416267 1.844563
29 -0.424404 1.136447 -0.827502 1.506323 -0.630661 0.988075 1.211921 -0.324148 1.498055 -0.626101 1.321091 1.289677
... ... ... ... ... ... ... ... ... ... ... ... ...
114 0.974362 -0.400418 0.642886 -0.973136 -0.705590 -0.422351 -0.641737 -0.124930 -0.610955 1.428276 -0.948151 -1.083631
115 0.284279 1.069626 0.177590 0.656479 0.265933 1.260127 0.211181 1.479561 1.498055 -0.348265 0.959183 -0.262467
116 0.401154 -0.702442 -1.068462 -0.829697 -0.787061 -0.712610 1.069084 -0.995528 -0.521572 1.092917 0.094596 1.844563
117 -0.392111 -0.441150 -0.850725 -0.730060 -0.590334 -0.810149 -0.085415 -0.255326 0.062980 1.428276 0.100370 1.127543
118 -1.167027 -0.128653 -0.576682 -0.857585 -0.875277 0.070585 2.703717 -1.315942 -1.242368 -0.230806 -0.934449 0.966999
119 -1.453231 -1.071100 -1.212986 -0.993891 -1.111719 0.192760 -0.824082 -0.543882 1.498055 -1.097875 -1.213118 -1.155045
120 -1.054909 -1.071100 0.809406 -1.001060 2.502299 1.963690 -0.902727 -1.387547 -1.268177 1.428276 -0.575983 -0.540965
121 -1.120738 -1.071100 -0.866114 -0.865353 2.514969 -1.021752 -0.889346 -1.354656 -1.264228 0.155874 -0.975986 -1.133896
122 -0.737469 -0.066958 1.708314 0.402373 0.024144 0.762426 -0.634542 1.630447 -0.974569 0.125146 0.555123 -0.717956
123 0.442153 -0.009588 1.301573 0.189897 0.126226 -0.685488 -0.235148 1.449126 -0.770793 1.428276 0.338088 0.077417
124 0.565085 -0.109187 1.708314 -0.270542 -0.174892 -0.875584 -0.663332 -0.616726 -1.041169 0.001534 -0.452279 -0.778143
125 1.007454 1.212228 1.708315 0.655560 0.997159 -0.014497 1.717990 0.884663 0.403666 0.774716 0.552503 1.696155
126 1.289118 0.354850 0.489878 0.491648 0.353546 -0.118255 0.937431 1.341005 1.033868 1.428276 0.501331 1.821758
127 0.028566 -0.686803 -0.979367 -0.791220 0.101396 -0.907919 -0.345653 -0.965689 -0.310560 0.141809 0.026242 1.844563
128 0.678311 -0.817132 1.446362 0.622042 0.636181 0.072008 -0.069805 -1.132198 -1.261564 -1.058991 -0.259138 1.844563
129 -0.163049 -0.571203 1.708314 0.661019 0.470865 0.046292 -0.374436 -0.995925 -1.290693 -1.171613 -0.486004 1.320617
130 -0.679135 -1.071100 1.708314 1.187633 -0.983778 0.166812 -0.902727 -0.804054 -1.328500 -1.408130 0.096574 -1.239435
131 -0.204732 -1.050977 0.143360 -1.000482 -1.121646 -0.314462 -0.902727 0.206977 0.418859 1.428276 1.018850 -0.993493
132 -0.966699 -1.071100 1.467246 0.266055 -1.121646 -0.920149 -0.902727 0.092987 0.417098 1.428276 1.104800 -1.136694
133 0.013654 2.406593 -0.420557 -0.020562 0.100086 -0.987233 -0.324993 -1.008730 0.790985 1.379950 0.681674 0.248593
134 0.770196 2.406593 -0.756682 -0.547320 -0.185781 -0.026969 -0.879742 -1.399022 -0.329956 0.189407 0.180515 1.255243
135 0.316774 1.171862 0.453349 0.986569 0.390405 -0.205625 0.010260 0.507765 1.182053 1.363422 1.911782 1.166586
136 0.384484 0.519829 -0.388746 0.502793 -0.676553 1.739251 -0.282937 -0.553546 0.908221 0.045664 1.911782 -0.766194
137 -1.084993 0.429049 -0.688647 -0.802982 -0.789349 -0.553963 0.156609 1.547841 1.498055 0.119488 1.636521 -0.946370
138 0.980726 1.839717 1.338253 1.957363 2.514969 0.934743 -0.841064 -1.071315 -1.328500 -1.401581 -1.188112 -1.233483
139 -1.071701 -1.071012 -0.961961 -0.905451 2.339631 0.389698 0.308230 0.057114 -0.497016 0.228955 -0.961941 1.844563
140 -0.415164 -1.071100 -1.221501 -0.693006 2.514969 -1.009569 1.155568 -0.071769 -1.197183 -1.374479 -1.213118 0.168259
141 -1.200546 -1.040557 0.160549 -0.991650 -0.803943 -0.827445 0.797509 1.475607 0.141845 1.428276 -0.383061 0.771996
142 1.505233 0.201458 0.615256 -0.813296 -0.041980 1.737065 -0.019083 0.530116 0.175767 1.099664 -0.498466 0.042633
143 -0.236120 -0.146675 -0.110274 2.342766 0.998147 2.465626 -0.067593 -0.182221 -0.463531 -0.779670 0.565948 -0.276677

144 rows × 12 columns

In [491]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[491]:
[1728.0,
 1478.2106072382894,
 1332.479257330826,
 1218.449951308628,
 1138.0957918080462,
 1068.887842720851,
 1011.6664613063779,
 964.4282803174967,
 921.5196331917493,
 895.2545968491829,
 849.4045014782471,
 823.1536172247521,
 793.2889327408209,
 759.9213103718073]
In [492]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[492]:
[<matplotlib.lines.Line2D at 0x1b83165ae10>]

K=2

In [493]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[493]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [494]:
kmeans_ch.labels_
Out[494]:
array([1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0,
       1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0])
In [495]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[495]:
array([1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0,
       1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
       1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,
       0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1,
       1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0])
In [496]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [497]:
X
Out[497]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 Cluster chosen
0 -0.580880 -0.990276 0.508438 0.658218 1.668751 -0.289029 -0.581158 1.476860 0.248923 1.428276 -0.870636 -0.174217 1 0
1 -0.886270 1.680400 1.708314 0.223308 0.928680 -0.795001 0.621735 -0.834425 -0.922785 -0.428866 -1.169994 -0.427540 0 0
2 -0.133384 0.811454 -1.227033 -0.981611 -1.054713 -1.027505 -0.745868 0.198884 0.508413 -1.114895 -0.312476 1.844563 1 0
3 -0.551071 0.259668 -1.073758 -0.208557 0.002105 -0.655399 -0.056661 -1.206025 -1.133355 -1.217881 -0.812411 1.844563 1 0
4 -1.234016 -0.118636 -1.227033 -0.725200 -1.112673 -0.992824 -0.809889 -1.428072 -1.315307 -1.379943 -0.903603 1.844563 1 0
5 1.505233 0.156720 1.279534 0.976338 0.335058 1.153170 -0.342400 -0.203256 -0.952802 -0.844743 1.303172 -0.087828 0 0
6 1.505233 -0.276775 0.478146 0.610910 1.357200 2.163626 0.746385 -0.601441 -0.891301 -0.864699 0.723335 -0.218435 0 0
7 -1.098308 2.406593 0.908371 -0.871045 1.373331 -0.134337 2.214860 -0.690075 0.702474 -0.659995 -0.671389 -0.457981 0 0
8 1.505233 0.396108 -0.186683 -0.340118 -1.050443 -0.431777 -0.808361 -0.443038 -1.175635 -1.295109 0.226236 -0.613691 1 0
9 -0.545948 0.718771 -0.767661 -0.855150 0.352508 -0.644761 0.330241 0.685662 0.593385 1.260081 0.858576 1.844563 1 0
10 -0.759467 1.402120 1.708315 1.252125 0.573961 0.248571 1.668183 -0.726951 -0.188602 -0.002148 -0.093469 0.787068 0 0
11 -0.626015 2.019019 0.765623 1.228928 2.278317 0.591124 2.703718 -0.377378 1.164753 -0.756752 -0.671882 -0.027193 0 0
12 -0.707480 0.753456 1.102092 -0.728084 0.729813 -0.794273 -0.114544 0.663648 1.218463 1.428276 0.637503 0.904412 1 0
13 0.053762 1.374763 -0.064790 -0.121229 -0.205398 -0.641216 1.872586 -0.187941 1.498055 0.280727 0.185105 -0.176726 0 0
14 -1.007854 1.946086 0.162150 -0.040380 1.209596 -0.283561 2.703718 -0.479048 1.139185 -0.094942 -0.241802 -0.230658 0 0
15 -1.148500 -1.038639 1.466504 -0.903609 0.186581 -0.985592 -0.582354 2.493864 -0.658620 -0.409259 -1.142408 -0.002720 1 0
16 0.580515 0.408966 0.759067 -1.001060 0.697356 -1.027505 0.420872 -1.192638 -1.046819 1.428276 -1.213118 -0.620821 1 0
17 0.828864 -0.024616 1.708315 0.336834 2.119154 -0.083648 -0.260916 0.125194 -0.252140 0.315916 -0.554175 1.271464 0 0
18 -1.323765 -1.071100 -1.227033 -1.001060 -0.802220 -0.814440 -0.902727 -1.012315 -0.180743 1.428276 -1.211184 -1.197199 1 0
19 1.258380 -0.980344 -1.169455 -0.895307 0.784365 0.582611 -0.533001 0.319883 -1.027202 1.428276 -1.014103 0.265376 1 0
20 0.584754 2.257694 1.617268 2.780448 0.474336 0.004282 -0.015547 -1.203892 -1.203690 -1.381076 -1.170062 -0.610268 0 0
21 0.872730 2.337993 1.708314 -0.492713 -0.703382 -0.834409 0.298125 -0.068247 0.445576 1.226208 -1.003116 -0.229748 0 0
22 1.505233 -0.325266 0.239552 -0.911432 0.097951 -0.791099 -0.494947 1.831184 -0.367337 0.094547 -0.372461 1.678056 1 0
23 -1.341382 1.475366 1.708314 -0.430421 -0.137473 -0.924961 0.497402 -1.323459 -1.068884 1.412572 -1.166306 -1.088362 1 0
24 1.505233 -0.058685 1.279528 0.485516 -0.774672 -0.465361 -0.570279 -0.818106 -0.541573 -0.742412 -0.782598 0.830590 1 0
25 1.505233 -0.199351 1.568596 1.912238 -0.689720 -0.366603 -0.547507 0.350096 0.290752 -0.505127 0.813170 0.107592 0 0
26 1.084848 -0.023978 1.708314 2.367620 -0.203979 1.003510 0.238525 0.369125 0.271989 -0.249002 -0.148803 1.184233 0 0
27 0.552717 0.139915 0.547890 -0.898140 0.438535 -0.781040 0.355213 2.187005 -0.064105 0.877837 -0.435347 1.844563 1 0
28 -0.960017 -0.983960 -1.209972 -0.300931 -1.083476 -0.784556 0.184926 -0.959029 0.169592 -0.457636 0.416267 1.844563 1 0
29 -0.424404 1.136447 -0.827502 1.506323 -0.630661 0.988075 1.211921 -0.324148 1.498055 -0.626101 1.321091 1.289677 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
114 0.974362 -0.400418 0.642886 -0.973136 -0.705590 -0.422351 -0.641737 -0.124930 -0.610955 1.428276 -0.948151 -1.083631 1 1
115 0.284279 1.069626 0.177590 0.656479 0.265933 1.260127 0.211181 1.479561 1.498055 -0.348265 0.959183 -0.262467 0 1
116 0.401154 -0.702442 -1.068462 -0.829697 -0.787061 -0.712610 1.069084 -0.995528 -0.521572 1.092917 0.094596 1.844563 1 1
117 -0.392111 -0.441150 -0.850725 -0.730060 -0.590334 -0.810149 -0.085415 -0.255326 0.062980 1.428276 0.100370 1.127543 1 1
118 -1.167027 -0.128653 -0.576682 -0.857585 -0.875277 0.070585 2.703717 -1.315942 -1.242368 -0.230806 -0.934449 0.966999 1 1
119 -1.453231 -1.071100 -1.212986 -0.993891 -1.111719 0.192760 -0.824082 -0.543882 1.498055 -1.097875 -1.213118 -1.155045 1 1
120 -1.054909 -1.071100 0.809406 -1.001060 2.502299 1.963690 -0.902727 -1.387547 -1.268177 1.428276 -0.575983 -0.540965 1 1
121 -1.120738 -1.071100 -0.866114 -0.865353 2.514969 -1.021752 -0.889346 -1.354656 -1.264228 0.155874 -0.975986 -1.133896 1 1
122 -0.737469 -0.066958 1.708314 0.402373 0.024144 0.762426 -0.634542 1.630447 -0.974569 0.125146 0.555123 -0.717956 1 1
123 0.442153 -0.009588 1.301573 0.189897 0.126226 -0.685488 -0.235148 1.449126 -0.770793 1.428276 0.338088 0.077417 1 1
124 0.565085 -0.109187 1.708314 -0.270542 -0.174892 -0.875584 -0.663332 -0.616726 -1.041169 0.001534 -0.452279 -0.778143 1 1
125 1.007454 1.212228 1.708315 0.655560 0.997159 -0.014497 1.717990 0.884663 0.403666 0.774716 0.552503 1.696155 0 1
126 1.289118 0.354850 0.489878 0.491648 0.353546 -0.118255 0.937431 1.341005 1.033868 1.428276 0.501331 1.821758 0 1
127 0.028566 -0.686803 -0.979367 -0.791220 0.101396 -0.907919 -0.345653 -0.965689 -0.310560 0.141809 0.026242 1.844563 1 1
128 0.678311 -0.817132 1.446362 0.622042 0.636181 0.072008 -0.069805 -1.132198 -1.261564 -1.058991 -0.259138 1.844563 1 1
129 -0.163049 -0.571203 1.708314 0.661019 0.470865 0.046292 -0.374436 -0.995925 -1.290693 -1.171613 -0.486004 1.320617 1 1
130 -0.679135 -1.071100 1.708314 1.187633 -0.983778 0.166812 -0.902727 -0.804054 -1.328500 -1.408130 0.096574 -1.239435 1 1
131 -0.204732 -1.050977 0.143360 -1.000482 -1.121646 -0.314462 -0.902727 0.206977 0.418859 1.428276 1.018850 -0.993493 1 1
132 -0.966699 -1.071100 1.467246 0.266055 -1.121646 -0.920149 -0.902727 0.092987 0.417098 1.428276 1.104800 -1.136694 1 1
133 0.013654 2.406593 -0.420557 -0.020562 0.100086 -0.987233 -0.324993 -1.008730 0.790985 1.379950 0.681674 0.248593 0 1
134 0.770196 2.406593 -0.756682 -0.547320 -0.185781 -0.026969 -0.879742 -1.399022 -0.329956 0.189407 0.180515 1.255243 0 1
135 0.316774 1.171862 0.453349 0.986569 0.390405 -0.205625 0.010260 0.507765 1.182053 1.363422 1.911782 1.166586 0 1
136 0.384484 0.519829 -0.388746 0.502793 -0.676553 1.739251 -0.282937 -0.553546 0.908221 0.045664 1.911782 -0.766194 0 1
137 -1.084993 0.429049 -0.688647 -0.802982 -0.789349 -0.553963 0.156609 1.547841 1.498055 0.119488 1.636521 -0.946370 1 1
138 0.980726 1.839717 1.338253 1.957363 2.514969 0.934743 -0.841064 -1.071315 -1.328500 -1.401581 -1.188112 -1.233483 0 1
139 -1.071701 -1.071012 -0.961961 -0.905451 2.339631 0.389698 0.308230 0.057114 -0.497016 0.228955 -0.961941 1.844563 1 1
140 -0.415164 -1.071100 -1.221501 -0.693006 2.514969 -1.009569 1.155568 -0.071769 -1.197183 -1.374479 -1.213118 0.168259 1 1
141 -1.200546 -1.040557 0.160549 -0.991650 -0.803943 -0.827445 0.797509 1.475607 0.141845 1.428276 -0.383061 0.771996 1 1
142 1.505233 0.201458 0.615256 -0.813296 -0.041980 1.737065 -0.019083 0.530116 0.175767 1.099664 -0.498466 0.042633 0 1
143 -0.236120 -0.146675 -0.110274 2.342766 0.998147 2.465626 -0.067593 -0.182221 -0.463531 -0.779670 0.565948 -0.276677 0 1

144 rows × 14 columns

In [498]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[498]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b831756ac8>
In [499]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

Specialized

ANN

In [25]:
X = df_n_ps_std_ch[4]
In [26]:
y = df_n_ps[4]['chosen']
In [27]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [28]:
X_train.shape
Out[28]:
(164, 12)
In [29]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [30]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [31]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [507]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'logistic', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.004, 'max_iter': 2000}, que permiten obtener un Accuracy de 73.78% y un Kappa del 46.44
Tiempo total: 24.78 minutos
In [32]:
grid.best_params_={'activation': 'sigmoid', 'hidden_layer_sizes': (20,), 'learning_rate_init': 0.004, 'max_iter': 2000}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [33]:
input_tensor = Input(shape = (n0,))
In [34]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [35]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [36]:
model.summary()
Model: "model_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 12)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 20)                260       
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 21        
=================================================================
Total params: 281
Trainable params: 281
Non-trainable params: 0
_________________________________________________________________
In [37]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 164 samples, validate on 55 samples
Epoch 1/2000
164/164 [==============================] - 1s 4ms/step - loss: 0.7436 - accuracy: 0.4146 - val_loss: 0.6989 - val_accuracy: 0.4545
Epoch 2/2000
164/164 [==============================] - 0s 98us/step - loss: 0.7193 - accuracy: 0.4573 - val_loss: 0.6965 - val_accuracy: 0.5091
Epoch 3/2000
164/164 [==============================] - 0s 98us/step - loss: 0.7058 - accuracy: 0.4573 - val_loss: 0.6952 - val_accuracy: 0.4909
Epoch 4/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6958 - accuracy: 0.5244 - val_loss: 0.6950 - val_accuracy: 0.4727
Epoch 5/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6861 - accuracy: 0.5427 - val_loss: 0.6954 - val_accuracy: 0.5273
Epoch 6/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6797 - accuracy: 0.5732 - val_loss: 0.6963 - val_accuracy: 0.5091
Epoch 7/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6731 - accuracy: 0.5854 - val_loss: 0.6976 - val_accuracy: 0.4909
Epoch 8/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6677 - accuracy: 0.6098 - val_loss: 0.7010 - val_accuracy: 0.4909
Epoch 9/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6643 - accuracy: 0.6098 - val_loss: 0.7051 - val_accuracy: 0.5091
Epoch 10/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6619 - accuracy: 0.6220 - val_loss: 0.7080 - val_accuracy: 0.5091
Epoch 11/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6589 - accuracy: 0.6280 - val_loss: 0.7086 - val_accuracy: 0.5091
Epoch 12/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6580 - accuracy: 0.6220 - val_loss: 0.7083 - val_accuracy: 0.5273
Epoch 13/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6557 - accuracy: 0.6159 - val_loss: 0.7093 - val_accuracy: 0.5273
Epoch 14/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6546 - accuracy: 0.6402 - val_loss: 0.7106 - val_accuracy: 0.5273
Epoch 15/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6520 - accuracy: 0.6402 - val_loss: 0.7130 - val_accuracy: 0.5455
Epoch 16/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6498 - accuracy: 0.6524 - val_loss: 0.7159 - val_accuracy: 0.5818
Epoch 17/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6474 - accuracy: 0.6585 - val_loss: 0.7181 - val_accuracy: 0.5818
Epoch 18/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6452 - accuracy: 0.6524 - val_loss: 0.7194 - val_accuracy: 0.5818
Epoch 19/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6436 - accuracy: 0.6585 - val_loss: 0.7203 - val_accuracy: 0.5818
Epoch 20/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6416 - accuracy: 0.6524 - val_loss: 0.7199 - val_accuracy: 0.5636
Epoch 21/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6393 - accuracy: 0.6463 - val_loss: 0.7179 - val_accuracy: 0.5455
Epoch 22/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6373 - accuracy: 0.6463 - val_loss: 0.7153 - val_accuracy: 0.5091
Epoch 23/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6365 - accuracy: 0.6768 - val_loss: 0.7155 - val_accuracy: 0.5091
Epoch 24/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6361 - accuracy: 0.6707 - val_loss: 0.7169 - val_accuracy: 0.5091
Epoch 25/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6344 - accuracy: 0.6707 - val_loss: 0.7197 - val_accuracy: 0.5455
Epoch 26/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6324 - accuracy: 0.6585 - val_loss: 0.7238 - val_accuracy: 0.5091

Epoch 00026: ReduceLROnPlateau reducing learning rate to 0.0020000000949949026.
Epoch 27/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6302 - accuracy: 0.6341 - val_loss: 0.7254 - val_accuracy: 0.4909
Epoch 28/2000
164/164 [==============================] - ETA: 0s - loss: 0.6634 - accuracy: 0.53 - 0s 91us/step - loss: 0.6304 - accuracy: 0.6402 - val_loss: 0.7253 - val_accuracy: 0.4909
Epoch 29/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6292 - accuracy: 0.6402 - val_loss: 0.7252 - val_accuracy: 0.5091
Epoch 30/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6283 - accuracy: 0.6524 - val_loss: 0.7253 - val_accuracy: 0.5091
Epoch 31/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6280 - accuracy: 0.6585 - val_loss: 0.7244 - val_accuracy: 0.5273
Epoch 32/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6274 - accuracy: 0.6646 - val_loss: 0.7236 - val_accuracy: 0.5273
Epoch 33/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6268 - accuracy: 0.6646 - val_loss: 0.7238 - val_accuracy: 0.5091
Epoch 34/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6262 - accuracy: 0.6585 - val_loss: 0.7240 - val_accuracy: 0.5091
Epoch 35/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6256 - accuracy: 0.6646 - val_loss: 0.7250 - val_accuracy: 0.5273
Epoch 36/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6253 - accuracy: 0.6585 - val_loss: 0.7253 - val_accuracy: 0.5273

Epoch 00036: ReduceLROnPlateau reducing learning rate to 0.0010000000474974513.
Epoch 37/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6248 - accuracy: 0.6524 - val_loss: 0.7263 - val_accuracy: 0.5273
Epoch 38/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6245 - accuracy: 0.6524 - val_loss: 0.7281 - val_accuracy: 0.5455
Epoch 39/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6242 - accuracy: 0.6524 - val_loss: 0.7290 - val_accuracy: 0.5455
Epoch 40/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6241 - accuracy: 0.6646 - val_loss: 0.7298 - val_accuracy: 0.5636
Epoch 41/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6240 - accuracy: 0.6646 - val_loss: 0.7304 - val_accuracy: 0.5636
Epoch 42/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6239 - accuracy: 0.6646 - val_loss: 0.7306 - val_accuracy: 0.5455
Epoch 43/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6244 - accuracy: 0.6768 - val_loss: 0.7313 - val_accuracy: 0.5455
Epoch 44/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6241 - accuracy: 0.6768 - val_loss: 0.7311 - val_accuracy: 0.5455
Epoch 45/2000
164/164 [==============================] - 0s 152us/step - loss: 0.6239 - accuracy: 0.6768 - val_loss: 0.7308 - val_accuracy: 0.5455
Epoch 46/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6233 - accuracy: 0.6646 - val_loss: 0.7314 - val_accuracy: 0.5273

Epoch 00046: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
Epoch 47/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6225 - accuracy: 0.6646 - val_loss: 0.7317 - val_accuracy: 0.5273
Epoch 48/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6223 - accuracy: 0.6585 - val_loss: 0.7320 - val_accuracy: 0.5273
Epoch 49/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6222 - accuracy: 0.6524 - val_loss: 0.7322 - val_accuracy: 0.5455
Epoch 50/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6218 - accuracy: 0.6463 - val_loss: 0.7323 - val_accuracy: 0.5455
Epoch 51/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6216 - accuracy: 0.6524 - val_loss: 0.7328 - val_accuracy: 0.5273
Epoch 52/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6214 - accuracy: 0.6524 - val_loss: 0.7332 - val_accuracy: 0.5273
Epoch 53/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6212 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 54/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6211 - accuracy: 0.6524 - val_loss: 0.7340 - val_accuracy: 0.5455
Epoch 55/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6211 - accuracy: 0.6585 - val_loss: 0.7340 - val_accuracy: 0.5455
Epoch 56/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6210 - accuracy: 0.6524 - val_loss: 0.7341 - val_accuracy: 0.5455

Epoch 00056: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
Epoch 57/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6209 - accuracy: 0.6524 - val_loss: 0.7342 - val_accuracy: 0.5455
Epoch 58/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6209 - accuracy: 0.6524 - val_loss: 0.7342 - val_accuracy: 0.5455
Epoch 59/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6208 - accuracy: 0.6585 - val_loss: 0.7343 - val_accuracy: 0.5273
Epoch 60/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6207 - accuracy: 0.6646 - val_loss: 0.7342 - val_accuracy: 0.5273
Epoch 61/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6206 - accuracy: 0.6585 - val_loss: 0.7340 - val_accuracy: 0.5273
Epoch 62/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6205 - accuracy: 0.6585 - val_loss: 0.7340 - val_accuracy: 0.5273
Epoch 63/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6205 - accuracy: 0.6585 - val_loss: 0.7341 - val_accuracy: 0.5273
Epoch 64/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6204 - accuracy: 0.6585 - val_loss: 0.7341 - val_accuracy: 0.5273
Epoch 65/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6203 - accuracy: 0.6524 - val_loss: 0.7341 - val_accuracy: 0.5273
Epoch 66/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6202 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00066: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
Epoch 67/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6201 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 68/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6201 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 69/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6200 - accuracy: 0.6463 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 70/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6200 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 71/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6199 - accuracy: 0.6463 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 72/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6200 - accuracy: 0.6463 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 73/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6199 - accuracy: 0.6463 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 74/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6198 - accuracy: 0.6463 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 75/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6198 - accuracy: 0.6524 - val_loss: 0.7335 - val_accuracy: 0.5273
Epoch 76/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6198 - accuracy: 0.6463 - val_loss: 0.7335 - val_accuracy: 0.5273

Epoch 00076: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
Epoch 77/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6198 - accuracy: 0.6463 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 78/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6198 - accuracy: 0.6524 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 79/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6197 - accuracy: 0.6463 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 80/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6197 - accuracy: 0.6463 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 81/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6197 - accuracy: 0.6524 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 82/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6197 - accuracy: 0.6524 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 83/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6197 - accuracy: 0.6524 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 84/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6197 - accuracy: 0.6524 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 85/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6197 - accuracy: 0.6524 - val_loss: 0.7336 - val_accuracy: 0.5273
Epoch 86/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273

Epoch 00086: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
Epoch 87/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 88/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 89/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 90/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 91/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 92/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7337 - val_accuracy: 0.5273
Epoch 93/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 94/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 95/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 96/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273

Epoch 00096: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
Epoch 97/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 98/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 99/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 100/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 101/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6196 - accuracy: 0.6524 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 102/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 103/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 104/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 105/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 106/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273

Epoch 00106: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
Epoch 107/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 108/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 109/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 110/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 111/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 112/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7338 - val_accuracy: 0.5273
Epoch 113/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 114/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 115/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 116/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00116: ReduceLROnPlateau reducing learning rate to 3.906250185536919e-06.
Epoch 117/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 118/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 119/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 120/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 121/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 122/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 123/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 124/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 125/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 126/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00126: ReduceLROnPlateau reducing learning rate to 1.9531250927684596e-06.
Epoch 127/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 128/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 129/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 130/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 131/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 132/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 133/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 134/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 135/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 136/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00136: ReduceLROnPlateau reducing learning rate to 9.765625463842298e-07.
Epoch 137/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6585 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 138/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 139/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 140/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 141/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 142/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 143/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 144/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 145/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 146/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00146: ReduceLROnPlateau reducing learning rate to 4.882812731921149e-07.
Epoch 147/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 148/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 149/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 150/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 151/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 152/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 153/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 154/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 155/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 156/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00156: ReduceLROnPlateau reducing learning rate to 2.4414063659605745e-07.
Epoch 157/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 158/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 159/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 160/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 161/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 162/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 163/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 164/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 165/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 166/2000
164/164 [==============================] - ETA: 0s - loss: 0.6557 - accuracy: 0.62 - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00166: ReduceLROnPlateau reducing learning rate to 1.2207031829802872e-07.
Epoch 167/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 168/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 169/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 170/2000
164/164 [==============================] - 0s 152us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 171/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 172/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 173/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 174/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 175/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 176/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00176: ReduceLROnPlateau reducing learning rate to 6.103515914901436e-08.
Epoch 177/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 178/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 179/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 180/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 181/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 182/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 183/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 184/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 185/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 186/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00186: ReduceLROnPlateau reducing learning rate to 3.051757957450718e-08.
Epoch 187/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 188/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 189/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 190/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 191/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 192/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 193/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 194/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 195/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 196/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00196: ReduceLROnPlateau reducing learning rate to 1.525878978725359e-08.
Epoch 197/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 198/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 199/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 200/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 201/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 202/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 203/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 204/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 205/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 206/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00206: ReduceLROnPlateau reducing learning rate to 7.629394893626795e-09.
Epoch 207/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 208/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 209/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 210/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 211/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 212/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 213/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 214/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 215/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 216/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00216: ReduceLROnPlateau reducing learning rate to 3.814697446813398e-09.
Epoch 217/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 218/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 219/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 220/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 221/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 222/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 223/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 224/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 225/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 226/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00226: ReduceLROnPlateau reducing learning rate to 1.907348723406699e-09.
Epoch 227/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 228/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 229/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 230/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 231/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 232/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 233/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 234/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 235/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 236/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00236: ReduceLROnPlateau reducing learning rate to 9.536743617033494e-10.
Epoch 237/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 238/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 239/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 240/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 241/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 242/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 243/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 244/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 245/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 246/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00246: ReduceLROnPlateau reducing learning rate to 4.768371808516747e-10.
Epoch 247/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 248/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 249/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 250/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 251/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 252/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 253/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 254/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 255/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 256/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00256: ReduceLROnPlateau reducing learning rate to 2.3841859042583735e-10.
Epoch 257/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 258/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 259/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 260/2000
164/164 [==============================] - 0s 152us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 261/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 262/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 263/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 264/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 265/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 266/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00266: ReduceLROnPlateau reducing learning rate to 1.1920929521291868e-10.
Epoch 267/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 268/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 269/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 270/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 271/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 272/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 273/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 274/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 275/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 276/2000
164/164 [==============================] - ETA: 0s - loss: 0.6168 - accuracy: 0.71 - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00276: ReduceLROnPlateau reducing learning rate to 5.960464760645934e-11.
Epoch 277/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 278/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 279/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 280/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 281/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 282/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 283/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 284/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 285/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 286/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00286: ReduceLROnPlateau reducing learning rate to 2.980232380322967e-11.
Epoch 287/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 288/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 289/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 290/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 291/2000
164/164 [==============================] - ETA: 0s - loss: 0.6812 - accuracy: 0.53 - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 292/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 293/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 294/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 295/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 296/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00296: ReduceLROnPlateau reducing learning rate to 1.4901161901614834e-11.
Epoch 297/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 298/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 299/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 300/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 301/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 302/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 303/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 304/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 305/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 306/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00306: ReduceLROnPlateau reducing learning rate to 7.450580950807417e-12.
Epoch 307/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 308/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 309/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 310/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 311/2000
164/164 [==============================] - 0s 329us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 312/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 313/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 314/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 315/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 316/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00316: ReduceLROnPlateau reducing learning rate to 3.725290475403709e-12.
Epoch 317/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 318/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 319/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 320/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 321/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 322/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 323/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 324/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 325/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 326/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00326: ReduceLROnPlateau reducing learning rate to 1.8626452377018543e-12.
Epoch 327/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 328/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 329/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 330/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 331/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 332/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 333/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 334/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 335/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 336/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00336: ReduceLROnPlateau reducing learning rate to 9.313226188509272e-13.
Epoch 337/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 338/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 339/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 340/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 341/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 342/2000
164/164 [==============================] - 0s 152us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 343/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 344/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 345/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 346/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00346: ReduceLROnPlateau reducing learning rate to 4.656613094254636e-13.
Epoch 347/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 348/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 349/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 350/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 351/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 352/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 353/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 354/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 355/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 356/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00356: ReduceLROnPlateau reducing learning rate to 2.328306547127318e-13.
Epoch 357/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 358/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 359/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 360/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 361/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 362/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 363/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 364/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 365/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 366/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00366: ReduceLROnPlateau reducing learning rate to 1.164153273563659e-13.
Epoch 367/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 368/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 369/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 370/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 371/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 372/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 373/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 374/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 375/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 376/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00376: ReduceLROnPlateau reducing learning rate to 5.820766367818295e-14.
Epoch 377/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 378/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 379/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 380/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 381/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 382/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 383/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 384/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 385/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 386/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00386: ReduceLROnPlateau reducing learning rate to 2.9103831839091474e-14.
Epoch 387/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 388/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 389/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 390/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 391/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 392/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 393/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 394/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 395/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 396/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00396: ReduceLROnPlateau reducing learning rate to 1.4551915919545737e-14.
Epoch 397/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 398/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 399/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 400/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 401/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 402/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 403/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 404/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 405/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 406/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00406: ReduceLROnPlateau reducing learning rate to 7.275957959772868e-15.
Epoch 407/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 408/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 409/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 410/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 411/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 412/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 413/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 414/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 415/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 416/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00416: ReduceLROnPlateau reducing learning rate to 3.637978979886434e-15.
Epoch 417/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 418/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 419/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 420/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 421/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 422/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 423/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 424/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 425/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 426/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00426: ReduceLROnPlateau reducing learning rate to 1.818989489943217e-15.
Epoch 427/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 428/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 429/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 430/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 431/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 432/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 433/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 434/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 435/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 436/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00436: ReduceLROnPlateau reducing learning rate to 9.094947449716085e-16.
Epoch 437/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 438/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 439/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 440/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 441/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 442/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 443/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 444/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 445/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 446/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00446: ReduceLROnPlateau reducing learning rate to 4.547473724858043e-16.
Epoch 447/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 448/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 449/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 450/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 451/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 452/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 453/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 454/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 455/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 456/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00456: ReduceLROnPlateau reducing learning rate to 2.2737368624290214e-16.
Epoch 457/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 458/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 459/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 460/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 461/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 462/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 463/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 464/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 465/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 466/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00466: ReduceLROnPlateau reducing learning rate to 1.1368684312145107e-16.
Epoch 467/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 468/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 469/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 470/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 471/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 472/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 473/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 474/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 475/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 476/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00476: ReduceLROnPlateau reducing learning rate to 5.684342156072553e-17.
Epoch 477/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 478/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 479/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 480/2000
164/164 [==============================] - ETA: 0s - loss: 0.5786 - accuracy: 0.71 - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 481/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 482/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 483/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 484/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 485/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 486/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00486: ReduceLROnPlateau reducing learning rate to 2.842171078036277e-17.
Epoch 487/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 488/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 489/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 490/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 491/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 492/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 493/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 494/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 495/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 496/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00496: ReduceLROnPlateau reducing learning rate to 1.4210855390181384e-17.
Epoch 497/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 498/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 499/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 500/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 501/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 502/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 503/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 504/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 505/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 506/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00506: ReduceLROnPlateau reducing learning rate to 7.105427695090692e-18.
Epoch 507/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 508/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 509/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 510/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 511/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 512/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 513/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 514/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 515/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 516/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00516: ReduceLROnPlateau reducing learning rate to 3.552713847545346e-18.
Epoch 517/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 518/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 519/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 520/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 521/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 522/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 523/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 524/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 525/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 526/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00526: ReduceLROnPlateau reducing learning rate to 1.776356923772673e-18.
Epoch 527/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 528/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 529/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 530/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 531/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 532/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 533/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 534/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 535/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 536/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00536: ReduceLROnPlateau reducing learning rate to 8.881784618863365e-19.
Epoch 537/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 538/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 539/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 540/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 541/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 542/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 543/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 544/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 545/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 546/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00546: ReduceLROnPlateau reducing learning rate to 4.440892309431682e-19.
Epoch 547/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 548/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 549/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 550/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 551/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 552/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 553/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 554/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 555/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 556/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00556: ReduceLROnPlateau reducing learning rate to 2.220446154715841e-19.
Epoch 557/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 558/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 559/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 560/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 561/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 562/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 563/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 564/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 565/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 566/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00566: ReduceLROnPlateau reducing learning rate to 1.1102230773579206e-19.
Epoch 567/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 568/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 569/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 570/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 571/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 572/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 573/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 574/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 575/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 576/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00576: ReduceLROnPlateau reducing learning rate to 5.551115386789603e-20.
Epoch 577/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 578/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 579/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 580/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 581/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 582/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 583/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 584/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 585/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 586/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00586: ReduceLROnPlateau reducing learning rate to 2.7755576933948015e-20.
Epoch 587/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 588/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 589/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 590/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 591/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 592/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 593/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 594/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 595/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 596/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00596: ReduceLROnPlateau reducing learning rate to 1.3877788466974007e-20.
Epoch 597/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 598/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 599/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 600/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 601/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 602/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 603/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 604/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 605/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 606/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00606: ReduceLROnPlateau reducing learning rate to 6.938894233487004e-21.
Epoch 607/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 608/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 609/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 610/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 611/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 612/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 613/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 614/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 615/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 616/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00616: ReduceLROnPlateau reducing learning rate to 3.469447116743502e-21.
Epoch 617/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 618/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 619/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 620/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 621/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 622/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 623/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 624/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 625/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 626/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00626: ReduceLROnPlateau reducing learning rate to 1.734723558371751e-21.
Epoch 627/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 628/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 629/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 630/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 631/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 632/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 633/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 634/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 635/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 636/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00636: ReduceLROnPlateau reducing learning rate to 8.673617791858755e-22.
Epoch 637/2000
164/164 [==============================] - 0s 92us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 638/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 639/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 640/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 641/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 642/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 643/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 644/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 645/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 646/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00646: ReduceLROnPlateau reducing learning rate to 4.336808895929377e-22.
Epoch 647/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 648/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 649/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 650/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 651/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 652/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 653/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 654/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 655/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 656/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00656: ReduceLROnPlateau reducing learning rate to 2.1684044479646887e-22.
Epoch 657/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 658/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 659/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 660/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 661/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 662/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 663/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 664/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 665/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 666/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00666: ReduceLROnPlateau reducing learning rate to 1.0842022239823443e-22.
Epoch 667/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 668/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 669/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 670/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 671/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 672/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 673/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 674/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 675/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 676/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00676: ReduceLROnPlateau reducing learning rate to 5.421011119911722e-23.
Epoch 677/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 678/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 679/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 680/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 681/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 682/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 683/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 684/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 685/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 686/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00686: ReduceLROnPlateau reducing learning rate to 2.710505559955861e-23.
Epoch 687/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 688/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 689/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 690/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 691/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 692/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 693/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 694/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 695/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 696/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00696: ReduceLROnPlateau reducing learning rate to 1.3552527799779304e-23.
Epoch 697/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 698/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 699/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 700/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 701/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 702/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 703/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 704/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 705/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 706/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00706: ReduceLROnPlateau reducing learning rate to 6.776263899889652e-24.
Epoch 707/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 708/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 709/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 710/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 711/2000
164/164 [==============================] - ETA: 0s - loss: 0.5574 - accuracy: 0.68 - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 712/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 713/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 714/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 715/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 716/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00716: ReduceLROnPlateau reducing learning rate to 3.388131949944826e-24.
Epoch 717/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 718/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 719/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 720/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 721/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 722/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 723/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 724/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 725/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 726/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00726: ReduceLROnPlateau reducing learning rate to 1.694065974972413e-24.
Epoch 727/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 728/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 729/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 730/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 731/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 732/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 733/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 734/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 735/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 736/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00736: ReduceLROnPlateau reducing learning rate to 8.470329874862065e-25.
Epoch 737/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 738/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 739/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 740/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 741/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 742/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 743/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 744/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 745/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 746/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00746: ReduceLROnPlateau reducing learning rate to 4.2351649374310325e-25.
Epoch 747/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 748/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 749/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 750/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 751/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 752/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 753/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 754/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 755/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 756/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00756: ReduceLROnPlateau reducing learning rate to 2.1175824687155163e-25.
Epoch 757/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 758/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 759/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 760/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 761/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 762/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 763/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 764/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 765/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 766/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00766: ReduceLROnPlateau reducing learning rate to 1.0587912343577581e-25.
Epoch 767/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 768/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 769/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 770/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 771/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 772/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 773/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 774/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 775/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 776/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00776: ReduceLROnPlateau reducing learning rate to 5.293956171788791e-26.
Epoch 777/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 778/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 779/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 780/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 781/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 782/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 783/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 784/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 785/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 786/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00786: ReduceLROnPlateau reducing learning rate to 2.6469780858943953e-26.
Epoch 787/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 788/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 789/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 790/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 791/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 792/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 793/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 794/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 795/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 796/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00796: ReduceLROnPlateau reducing learning rate to 1.3234890429471977e-26.
Epoch 797/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 798/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 799/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 800/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 801/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 802/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 803/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 804/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 805/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 806/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00806: ReduceLROnPlateau reducing learning rate to 6.617445214735988e-27.
Epoch 807/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 808/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 809/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 810/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 811/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 812/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 813/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 814/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 815/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 816/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00816: ReduceLROnPlateau reducing learning rate to 3.308722607367994e-27.
Epoch 817/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 818/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 819/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 820/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 821/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 822/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 823/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 824/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 825/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 826/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00826: ReduceLROnPlateau reducing learning rate to 1.654361303683997e-27.
Epoch 827/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 828/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 829/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 830/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 831/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 832/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 833/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 834/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 835/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 836/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00836: ReduceLROnPlateau reducing learning rate to 8.271806518419985e-28.
Epoch 837/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 838/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 839/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 840/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 841/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 842/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 843/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 844/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 845/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 846/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00846: ReduceLROnPlateau reducing learning rate to 4.135903259209993e-28.
Epoch 847/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 848/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 849/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 850/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 851/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 852/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 853/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 854/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 855/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 856/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00856: ReduceLROnPlateau reducing learning rate to 2.0679516296049964e-28.
Epoch 857/2000
164/164 [==============================] - ETA: 0s - loss: 0.6092 - accuracy: 0.68 - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 858/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 859/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 860/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 861/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 862/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 863/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 864/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 865/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 866/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00866: ReduceLROnPlateau reducing learning rate to 1.0339758148024982e-28.
Epoch 867/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 868/2000
164/164 [==============================] - 0s 171us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 869/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 870/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 871/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 872/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 873/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 874/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 875/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 876/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00876: ReduceLROnPlateau reducing learning rate to 5.169879074012491e-29.
Epoch 877/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 878/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 879/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 880/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 881/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 882/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 883/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 884/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 885/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 886/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00886: ReduceLROnPlateau reducing learning rate to 2.5849395370062454e-29.
Epoch 887/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 888/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 889/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 890/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 891/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 892/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 893/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 894/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 895/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 896/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00896: ReduceLROnPlateau reducing learning rate to 1.2924697685031227e-29.
Epoch 897/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 898/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 899/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 900/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 901/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 902/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 903/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 904/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 905/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 906/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00906: ReduceLROnPlateau reducing learning rate to 6.462348842515614e-30.
Epoch 907/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 908/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 909/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 910/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 911/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 912/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 913/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 914/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 915/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 916/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00916: ReduceLROnPlateau reducing learning rate to 3.231174421257807e-30.
Epoch 917/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 918/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 919/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 920/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 921/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 922/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 923/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 924/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 925/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 926/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00926: ReduceLROnPlateau reducing learning rate to 1.6155872106289034e-30.
Epoch 927/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 928/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 929/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 930/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 931/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 932/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 933/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 934/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 935/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 936/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00936: ReduceLROnPlateau reducing learning rate to 8.077936053144517e-31.
Epoch 937/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 938/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 939/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 940/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 941/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 942/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 943/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 944/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 945/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 946/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00946: ReduceLROnPlateau reducing learning rate to 4.0389680265722585e-31.
Epoch 947/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 948/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 949/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 950/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 951/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 952/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 953/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 954/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 955/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 956/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00956: ReduceLROnPlateau reducing learning rate to 2.0194840132861292e-31.
Epoch 957/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 958/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 959/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 960/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 961/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 962/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 963/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 964/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 965/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 966/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00966: ReduceLROnPlateau reducing learning rate to 1.0097420066430646e-31.
Epoch 967/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 968/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 969/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 970/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 971/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 972/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 973/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 974/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 975/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 976/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00976: ReduceLROnPlateau reducing learning rate to 5.048710033215323e-32.
Epoch 977/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 978/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 979/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 980/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 981/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 982/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 983/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 984/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 985/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 986/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00986: ReduceLROnPlateau reducing learning rate to 2.5243550166076616e-32.
Epoch 987/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 988/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 989/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 990/2000
164/164 [==============================] - ETA: 0s - loss: 0.5950 - accuracy: 0.65 - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 991/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 992/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 993/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 994/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 995/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 996/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 00996: ReduceLROnPlateau reducing learning rate to 1.2621775083038308e-32.
Epoch 997/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 998/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 999/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1000/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1001/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1002/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1003/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1004/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1005/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1006/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01006: ReduceLROnPlateau reducing learning rate to 6.310887541519154e-33.
Epoch 1007/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1008/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1009/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1010/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1011/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1012/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1013/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1014/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1015/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1016/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01016: ReduceLROnPlateau reducing learning rate to 3.155443770759577e-33.
Epoch 1017/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1018/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1019/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1020/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1021/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1022/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1023/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1024/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1025/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1026/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01026: ReduceLROnPlateau reducing learning rate to 1.5777218853797885e-33.
Epoch 1027/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1028/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1029/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1030/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1031/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1032/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1033/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1034/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1035/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1036/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01036: ReduceLROnPlateau reducing learning rate to 7.888609426898942e-34.
Epoch 1037/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1038/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1039/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1040/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1041/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1042/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1043/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1044/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1045/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1046/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01046: ReduceLROnPlateau reducing learning rate to 3.944304713449471e-34.
Epoch 1047/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1048/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1049/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1050/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1051/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1052/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1053/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1054/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1055/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1056/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01056: ReduceLROnPlateau reducing learning rate to 1.9721523567247356e-34.
Epoch 1057/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1058/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1059/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1060/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1061/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1062/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1063/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1064/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1065/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1066/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01066: ReduceLROnPlateau reducing learning rate to 9.860761783623678e-35.
Epoch 1067/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1068/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1069/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1070/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1071/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1072/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1073/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1074/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1075/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1076/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01076: ReduceLROnPlateau reducing learning rate to 4.930380891811839e-35.
Epoch 1077/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1078/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1079/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1080/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1081/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1082/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1083/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1084/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1085/2000
164/164 [==============================] - 0s 92us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1086/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01086: ReduceLROnPlateau reducing learning rate to 2.4651904459059195e-35.
Epoch 1087/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1088/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1089/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1090/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1091/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1092/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1093/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1094/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1095/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1096/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01096: ReduceLROnPlateau reducing learning rate to 1.2325952229529597e-35.
Epoch 1097/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1098/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1099/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1100/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1101/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1102/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1103/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1104/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1105/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1106/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01106: ReduceLROnPlateau reducing learning rate to 6.162976114764799e-36.
Epoch 1107/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1108/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1109/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1110/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1111/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1112/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1113/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1114/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1115/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1116/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01116: ReduceLROnPlateau reducing learning rate to 3.0814880573823994e-36.
Epoch 1117/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1118/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1119/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1120/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1121/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1122/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1123/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1124/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1125/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1126/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01126: ReduceLROnPlateau reducing learning rate to 1.5407440286911997e-36.
Epoch 1127/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1128/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1129/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1130/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1131/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1132/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1133/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1134/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1135/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1136/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01136: ReduceLROnPlateau reducing learning rate to 7.703720143455998e-37.
Epoch 1137/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1138/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1139/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1140/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1141/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1142/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1143/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1144/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1145/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1146/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01146: ReduceLROnPlateau reducing learning rate to 3.851860071727999e-37.
Epoch 1147/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1148/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1149/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1150/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1151/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1152/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1153/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1154/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1155/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1156/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01156: ReduceLROnPlateau reducing learning rate to 1.9259300358639996e-37.
Epoch 1157/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1158/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1159/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1160/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1161/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1162/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1163/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1164/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1165/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1166/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01166: ReduceLROnPlateau reducing learning rate to 9.629650179319998e-38.
Epoch 1167/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1168/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1169/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1170/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1171/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1172/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1173/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1174/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1175/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1176/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01176: ReduceLROnPlateau reducing learning rate to 4.814825089659999e-38.
Epoch 1177/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1178/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1179/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1180/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1181/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1182/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1183/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1184/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1185/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1186/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01186: ReduceLROnPlateau reducing learning rate to 2.4074125448299995e-38.
Epoch 1187/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1188/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1189/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1190/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1191/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1192/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1193/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1194/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1195/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1196/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01196: ReduceLROnPlateau reducing learning rate to 1.2037062724149998e-38.
Epoch 1197/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1198/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1199/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1200/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1201/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1202/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1203/2000
164/164 [==============================] - ETA: 0s - loss: 0.6377 - accuracy: 0.62 - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1204/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1205/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1206/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01206: ReduceLROnPlateau reducing learning rate to 6.018531362074999e-39.
Epoch 1207/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1208/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1209/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1210/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1211/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1212/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1213/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1214/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1215/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1216/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01216: ReduceLROnPlateau reducing learning rate to 3.0092660313621155e-39.
Epoch 1217/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1218/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1219/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1220/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1221/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1222/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1223/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1224/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1225/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1226/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01226: ReduceLROnPlateau reducing learning rate to 1.5046330156810577e-39.
Epoch 1227/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1228/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1229/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1230/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1231/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1232/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1233/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1234/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1235/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1236/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01236: ReduceLROnPlateau reducing learning rate to 7.523165078405289e-40.
Epoch 1237/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1238/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1239/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1240/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1241/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1242/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1243/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1244/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1245/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1246/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01246: ReduceLROnPlateau reducing learning rate to 3.761582539202644e-40.
Epoch 1247/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1248/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1249/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1250/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1251/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1252/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1253/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1254/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1255/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1256/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01256: ReduceLROnPlateau reducing learning rate to 1.880794772847483e-40.
Epoch 1257/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1258/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1259/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1260/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1261/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1262/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1263/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1264/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1265/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1266/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01266: ReduceLROnPlateau reducing learning rate to 9.403973864237415e-41.
Epoch 1267/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1268/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1269/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1270/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1271/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1272/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1273/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1274/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1275/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1276/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01276: ReduceLROnPlateau reducing learning rate to 4.701986932118707e-41.
Epoch 1277/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1278/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1279/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1280/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1281/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1282/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1283/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1284/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1285/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1286/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01286: ReduceLROnPlateau reducing learning rate to 2.3509584335977456e-41.
Epoch 1287/2000
164/164 [==============================] - 0s 158us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1288/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1289/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1290/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1291/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1292/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1293/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1294/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1295/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1296/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01296: ReduceLROnPlateau reducing learning rate to 1.1754792167988728e-41.
Epoch 1297/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1298/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1299/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1300/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1301/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1302/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1303/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1304/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1305/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1306/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01306: ReduceLROnPlateau reducing learning rate to 5.877045759378283e-42.
Epoch 1307/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1308/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1309/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1310/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1311/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1312/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1313/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1314/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1315/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1316/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01316: ReduceLROnPlateau reducing learning rate to 2.9385228796891414e-42.
Epoch 1317/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1318/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1319/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1320/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1321/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1322/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1323/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1324/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1325/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1326/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01326: ReduceLROnPlateau reducing learning rate to 1.4692614398445707e-42.
Epoch 1327/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1328/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1329/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1330/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1331/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1332/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1333/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1334/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1335/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1336/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01336: ReduceLROnPlateau reducing learning rate to 7.342803953062041e-43.
Epoch 1337/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1338/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1339/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1340/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1341/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1342/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1343/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1344/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1345/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1346/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01346: ReduceLROnPlateau reducing learning rate to 3.671401976531021e-43.
Epoch 1347/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1348/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1349/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1350/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1351/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1352/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1353/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1354/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1355/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1356/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01356: ReduceLROnPlateau reducing learning rate to 1.8357009882655104e-43.
Epoch 1357/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1358/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1359/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1360/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1361/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1362/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1363/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1364/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1365/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1366/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01366: ReduceLROnPlateau reducing learning rate to 9.178504941327552e-44.
Epoch 1367/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1368/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1369/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1370/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1371/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1372/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1373/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1374/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1375/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1376/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01376: ReduceLROnPlateau reducing learning rate to 4.624284932271896e-44.
Epoch 1377/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1378/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1379/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1380/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1381/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1382/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1383/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1384/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1385/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1386/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01386: ReduceLROnPlateau reducing learning rate to 2.312142466135948e-44.
Epoch 1387/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1388/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1389/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1390/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1391/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1392/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1393/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1394/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1395/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1396/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01396: ReduceLROnPlateau reducing learning rate to 1.1210387714598537e-44.
Epoch 1397/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1398/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1399/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1400/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1401/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1402/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1403/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1404/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1405/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1406/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01406: ReduceLROnPlateau reducing learning rate to 5.605193857299268e-45.
Epoch 1407/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1408/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1409/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1410/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1411/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1412/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1413/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1414/2000
164/164 [==============================] - ETA: 0s - loss: 0.6674 - accuracy: 0.65 - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1415/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1416/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01416: ReduceLROnPlateau reducing learning rate to 2.802596928649634e-45.
Epoch 1417/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1418/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1419/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1420/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1421/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1422/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1423/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1424/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1425/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1426/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01426: ReduceLROnPlateau reducing learning rate to 1.401298464324817e-45.
Epoch 1427/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1428/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1429/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1430/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1431/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1432/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1433/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1434/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1435/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1436/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273

Epoch 01436: ReduceLROnPlateau reducing learning rate to 7.006492321624085e-46.
Epoch 1437/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1438/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1439/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1440/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1441/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1442/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1443/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1444/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1445/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1446/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1447/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1448/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1449/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1450/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1451/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1452/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1453/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1454/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1455/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1456/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1457/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1458/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1459/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1460/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1461/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1462/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1463/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1464/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1465/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1466/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1467/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1468/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1469/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1470/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1471/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1472/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1473/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1474/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1475/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1476/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1477/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1478/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1479/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1480/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1481/2000
164/164 [==============================] - 0s 73us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1482/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1483/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1484/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1485/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1486/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1487/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1488/2000
164/164 [==============================] - ETA: 0s - loss: 0.5645 - accuracy: 0.68 - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1489/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1490/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1491/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1492/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1493/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1494/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1495/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1496/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1497/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1498/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1499/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1500/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1501/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1502/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1503/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1504/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1505/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1506/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1507/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1508/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1509/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1510/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1511/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1512/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1513/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1514/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1515/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1516/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1517/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1518/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1519/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1520/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1521/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1522/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1523/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1524/2000
164/164 [==============================] - ETA: 0s - loss: 0.5285 - accuracy: 0.75 - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1525/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1526/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1527/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1528/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1529/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1530/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1531/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1532/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1533/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1534/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1535/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1536/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1537/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1538/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1539/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1540/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1541/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1542/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1543/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1544/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1545/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1546/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1547/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1548/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1549/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1550/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1551/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1552/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1553/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1554/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1555/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1556/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1557/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1558/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1559/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1560/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1561/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1562/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1563/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1564/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1565/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1566/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1567/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1568/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1569/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1570/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1571/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1572/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1573/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1574/2000
164/164 [==============================] - 0s 158us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1575/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1576/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1577/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1578/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1579/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1580/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1581/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1582/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1583/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1584/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1585/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1586/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1587/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1588/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1589/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1590/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1591/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1592/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1593/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1594/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1595/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1596/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1597/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1598/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1599/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1600/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1601/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1602/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1603/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1604/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1605/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1606/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1607/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1608/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1609/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1610/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1611/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1612/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1613/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1614/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1615/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1616/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1617/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1618/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1619/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1620/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1621/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1622/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1623/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1624/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1625/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1626/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1627/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1628/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1629/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1630/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1631/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1632/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1633/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1634/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1635/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1636/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1637/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1638/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1639/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1640/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1641/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1642/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1643/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1644/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1645/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1646/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1647/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1648/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1649/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1650/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1651/2000
164/164 [==============================] - 0s 79us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1652/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1653/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1654/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1655/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1656/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1657/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1658/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1659/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1660/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1661/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1662/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1663/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1664/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1665/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1666/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1667/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1668/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1669/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1670/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1671/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1672/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1673/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1674/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1675/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1676/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1677/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1678/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1679/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1680/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1681/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1682/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1683/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1684/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1685/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1686/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1687/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1688/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1689/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1690/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1691/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1692/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1693/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1694/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1695/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1696/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1697/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1698/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1699/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1700/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1701/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1702/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1703/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1704/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1705/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1706/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1707/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1708/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1709/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1710/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1711/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1712/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1713/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1714/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1715/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1716/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1717/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1718/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1719/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1720/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1721/2000
164/164 [==============================] - 0s 158us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1722/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1723/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1724/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1725/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1726/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1727/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1728/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1729/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1730/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1731/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1732/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1733/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1734/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1735/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1736/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1737/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1738/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1739/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1740/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1741/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1742/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1743/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1744/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1745/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1746/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1747/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1748/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1749/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1750/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1751/2000
164/164 [==============================] - ETA: 0s - loss: 0.5969 - accuracy: 0.65 - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1752/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1753/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1754/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1755/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1756/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1757/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1758/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1759/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1760/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1761/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1762/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1763/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1764/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1765/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1766/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1767/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1768/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1769/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1770/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1771/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1772/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1773/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1774/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1775/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1776/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1777/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1778/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1779/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1780/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1781/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1782/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1783/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1784/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1785/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1786/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1787/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1788/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1789/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1790/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1791/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1792/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1793/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1794/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1795/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1796/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1797/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1798/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1799/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1800/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1801/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1802/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1803/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1804/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1805/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1806/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1807/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1808/2000
164/164 [==============================] - 0s 146us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1809/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1810/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1811/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1812/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1813/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1814/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1815/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1816/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1817/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1818/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1819/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1820/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1821/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1822/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1823/2000
164/164 [==============================] - 0s 128us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1824/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1825/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1826/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1827/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1828/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1829/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1830/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1831/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1832/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1833/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1834/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1835/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1836/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1837/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1838/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1839/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1840/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1841/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1842/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1843/2000
164/164 [==============================] - ETA: 0s - loss: 0.6190 - accuracy: 0.65 - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1844/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1845/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1846/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1847/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1848/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1849/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1850/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1851/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1852/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1853/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1854/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1855/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1856/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1857/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1858/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1859/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1860/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1861/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1862/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1863/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1864/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1865/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1866/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1867/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1868/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1869/2000
164/164 [==============================] - 0s 140us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1870/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1871/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1872/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1873/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1874/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1875/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1876/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1877/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1878/2000
164/164 [==============================] - ETA: 0s - loss: 0.6782 - accuracy: 0.53 - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1879/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1880/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1881/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1882/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1883/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1884/2000
164/164 [==============================] - ETA: 0s - loss: 0.5652 - accuracy: 0.71 - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1885/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1886/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1887/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1888/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1889/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1890/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1891/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1892/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1893/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1894/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1895/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1896/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1897/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1898/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1899/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1900/2000
164/164 [==============================] - 0s 122us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1901/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1902/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1903/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1904/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1905/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1906/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1907/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1908/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1909/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1910/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1911/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1912/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1913/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1914/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1915/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1916/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1917/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1918/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1919/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1920/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1921/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1922/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1923/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1924/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1925/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1926/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1927/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1928/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1929/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1930/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1931/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1932/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1933/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1934/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1935/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1936/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1937/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1938/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1939/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1940/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1941/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1942/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1943/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1944/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1945/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1946/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1947/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1948/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1949/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1950/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1951/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1952/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1953/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1954/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1955/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1956/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1957/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1958/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1959/2000
164/164 [==============================] - 0s 116us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1960/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1961/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1962/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1963/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1964/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1965/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1966/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1967/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1968/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1969/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1970/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1971/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1972/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1973/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1974/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1975/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1976/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1977/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1978/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1979/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1980/2000
164/164 [==============================] - 0s 134us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1981/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1982/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1983/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1984/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1985/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1986/2000
164/164 [==============================] - 0s 110us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1987/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1988/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1989/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1990/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1991/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1992/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1993/2000
164/164 [==============================] - 0s 98us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1994/2000
164/164 [==============================] - 0s 85us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1995/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1996/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1997/2000
164/164 [==============================] - 0s 91us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1998/2000
164/164 [==============================] - 0s 97us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 1999/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
Epoch 2000/2000
164/164 [==============================] - 0s 104us/step - loss: 0.6195 - accuracy: 0.6524 - val_loss: 0.7339 - val_accuracy: 0.5273
In [38]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 2000)
In [39]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
55/55 [==============================] - 0s 91us/step
test loss: 0.7338643464175137, test accuracy: 0.5272727012634277
In [40]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.548941798941799
In [41]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.0542328042328043
[[15 13]
 [13 14]]

KMeans

In [518]:
X
Out[518]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12
0 1.752761 -1.114598 -0.348132 2.966304 -0.925235 0.552064 -0.710296 0.412009 -0.784684 -1.667162 -0.834151 -1.566379
1 -1.387006 -0.333039 0.041297 -0.917052 0.656635 -1.022407 -0.805166 -0.905135 -0.708805 1.299875 -0.948816 -1.413196
2 -0.628834 2.234144 0.613536 -0.978644 0.836157 -0.735689 -0.059767 -1.571350 1.278264 -1.103616 -1.153426 0.685062
3 0.081693 1.765530 -0.365668 0.759057 -1.136519 -0.071939 -0.412587 -1.310708 1.465231 -1.266573 0.040076 -0.308065
4 0.056206 1.646501 0.508800 0.525847 0.506842 -0.390517 -0.241209 -0.409725 1.465231 -0.148455 -0.354779 0.188297
5 -0.475284 0.673200 -1.077774 0.360339 -0.032017 0.910768 1.405699 -0.748908 1.465231 -0.933838 -0.225835 -0.922170
6 0.163433 -0.011304 -1.057752 1.128932 0.026597 1.324397 -0.060004 -0.869144 1.465231 -0.902156 0.598522 -1.044263
7 0.110500 0.178211 -1.394026 0.897710 -1.243991 -0.384906 -0.976009 -1.391712 1.465231 -1.735560 -0.662454 -1.567335
8 0.595793 -0.898440 1.924099 0.051119 0.528166 0.377574 -0.881206 1.992593 -0.169332 0.289379 -1.264549 -0.986278
9 -0.017542 -1.386211 0.574605 -1.268664 -0.911663 -1.241512 -1.036865 1.992592 -1.595580 0.436331 -1.528923 -1.224807
10 0.796579 -1.216664 -0.112327 -0.690935 1.077368 0.874900 -0.528379 1.992593 -0.347105 1.295264 0.069115 -0.797501
11 1.761253 -0.949116 -0.297777 -0.913826 -0.875567 -0.968315 -0.407542 -0.016419 -0.556271 0.391283 0.743737 0.460341
12 1.055147 -0.394702 1.258031 -0.517345 -0.021328 -0.557321 0.669125 1.243976 0.262937 0.337908 0.485227 2.007205
13 1.761253 -0.435392 0.592085 -0.692391 0.535758 -0.708164 -0.382176 0.125232 -0.083947 0.888176 0.994199 1.087612
14 -0.137413 -0.980041 -1.297302 -0.880795 1.395884 -0.901503 -0.756382 -0.304071 -1.277311 1.299875 -1.162641 -1.274290
15 0.017749 -0.936126 -1.136240 -0.862914 0.782285 -0.230220 -0.734923 -0.295573 -1.221528 1.299875 -1.058979 -1.237947
16 -0.382429 -1.386211 -1.437693 -1.268664 0.481905 -1.219324 -1.036865 0.284908 -1.749408 1.299875 -1.532659 -1.596787
17 -1.227998 1.294887 2.200026 -0.318659 2.364886 -1.078770 -0.029818 -1.427905 -1.667310 0.632080 -1.532496 0.393871
18 -1.076569 -0.776099 -0.576736 -0.929915 0.188222 -1.067980 0.156056 -0.671783 -0.806376 1.299875 -0.994450 -0.887100
19 -1.180746 0.385053 2.200026 -0.005979 1.302093 -0.928084 -0.007355 -0.793418 -1.747571 -0.917240 -1.528514 -0.042363
20 1.192498 -0.224351 -0.206238 0.217971 0.304072 0.949289 -0.514273 1.091683 1.465231 -0.718685 -0.298094 -0.264379
21 1.213472 0.048184 -0.600330 0.007879 0.177356 1.215438 -0.394095 0.833193 1.465231 -0.381808 -0.415137 -0.479222
22 -0.004572 -0.052026 2.200026 -0.008377 1.967963 -0.419263 -0.302302 0.046448 -0.343181 1.114245 -0.245566 0.544195
23 -0.229893 0.630662 0.484895 0.048344 -0.498767 0.662733 -0.509073 0.013777 1.465231 0.438954 -0.681106 -0.745425
24 0.217943 -0.177066 0.659322 -0.134414 1.650468 -0.583176 -0.559211 -0.406584 -0.598204 1.299875 -0.406852 0.234437
25 -0.333597 0.972324 -0.721724 -0.554449 -0.493410 0.518707 0.321268 -1.070955 -0.564510 -0.833869 1.695138 -0.783758
26 -0.602430 -0.028610 -0.826715 -0.377974 -0.696055 -0.182813 -0.120202 -0.886178 -0.584506 -0.673011 1.695138 -0.599349
27 -0.251812 -0.171571 -0.358821 -0.244636 -0.013843 0.945000 1.038022 -0.727984 0.773167 -0.048067 1.695138 0.563056
28 -0.368678 2.234146 -0.330756 -0.138877 -0.559553 -0.851160 -0.744847 -0.884446 -0.828451 -1.011890 -0.568588 0.130393
29 -0.578621 2.234146 -0.562376 -0.105376 -0.523903 -0.825786 -0.690314 -0.809436 -0.870132 -1.142574 -0.536734 0.859716
... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.186741 -1.165084 -0.921181 1.397816 -0.045251 1.304725 -0.153535 1.038843 1.465231 0.175798 1.209572 -0.558994
190 -0.163966 0.884601 1.657605 -0.260803 1.379747 -0.732537 -0.007962 1.276498 -0.419568 1.299875 -0.588765 1.807669
191 -0.254097 -0.617571 0.535718 -0.297186 1.865594 -0.310035 0.916200 1.349280 -0.168599 1.299875 -0.479982 1.556484
192 -1.359431 0.228998 0.259367 -1.169606 0.516753 -1.080823 1.794240 -0.257346 -1.633246 0.226711 -1.071882 2.007205
193 1.622617 2.234146 -1.127605 -0.273622 -1.141311 0.831169 -0.681956 -1.023930 0.097381 -1.505036 -0.432936 -0.703132
194 -0.467818 -0.314482 0.014154 0.025208 -0.807816 -0.093075 3.055878 0.013994 -0.514838 -0.698969 0.430445 -0.302544
195 -1.423661 -0.599038 -0.707969 -1.084898 -1.206212 -0.474886 1.273960 -1.605019 -1.163780 -1.777082 1.695138 -1.000177
196 1.761253 -0.325612 0.357954 -0.709817 1.043699 -0.927401 -0.653399 -0.107407 -0.553233 -0.078311 -0.687092 -0.080809
197 1.761253 1.028574 1.374947 0.116683 1.136600 -0.187955 -0.195137 0.341797 -0.856237 0.366485 -0.658035 0.645642
198 1.761253 0.050043 0.687757 -0.434894 0.705291 -0.680095 -0.549325 0.816661 -0.567882 0.584770 -0.302600 -0.144573
199 -0.919189 -0.200271 -1.032880 -0.020720 -1.069924 -0.649964 -0.843737 -0.600356 1.465231 -0.271885 0.807204 -0.901281
200 -0.988568 -0.189392 -1.028379 0.102199 -1.085401 -0.667213 -0.580786 -0.498427 1.465231 -0.287173 0.737684 -0.938325
201 -0.779270 0.427427 -0.948100 -0.165441 -1.033638 0.272258 -0.022274 -0.198353 1.465231 -0.072695 1.561935 -0.848980
202 1.116127 -1.139042 0.168460 -1.122851 -0.854043 1.067951 -0.735589 0.656822 0.343006 1.299875 0.776152 -0.247153
203 0.762440 -1.190866 -0.050517 -1.116856 -1.095215 0.632777 -0.797158 1.167398 1.149430 1.299875 1.392402 0.047424
204 0.058411 -1.128496 -1.328850 -1.229911 -1.074617 -0.085788 -0.812628 0.326031 0.625580 1.299875 1.290873 -0.077297
205 -0.720270 -0.697672 -0.720394 1.813110 -0.859595 -0.861783 -0.908744 1.216314 1.411592 -0.405169 1.695138 -0.439681
206 -0.364461 -0.010160 -1.095500 0.914425 -1.040229 0.885108 -0.998962 0.772555 1.465231 -0.838613 0.940082 -0.955178
207 -0.272236 -0.196860 -0.727982 0.141418 -0.904008 -0.065613 -0.944991 0.795648 0.611803 0.903563 1.695137 -0.158991
208 1.761253 -0.082523 -0.122308 0.265346 -0.485445 0.500219 0.126422 -0.354469 0.139245 -0.560750 1.149004 0.354466
209 1.573516 -0.161473 -0.359909 1.241760 -0.664508 1.002463 -0.076931 0.519366 1.465231 -0.646843 0.532185 -0.215564
210 1.761253 0.417875 -0.918851 -0.929813 -0.982357 -0.771036 -0.949451 -0.679224 -0.652751 -1.604058 -0.786724 -0.848956
211 1.761253 0.145550 1.710575 1.914520 1.233461 2.474950 0.953725 1.263177 0.830563 0.011243 0.602765 -0.024981
212 0.470346 -0.334996 2.200026 1.858653 0.847856 1.364055 0.061293 1.366431 -0.301856 -0.501569 -0.235203 -0.695720
213 -0.576457 -0.914445 1.070087 0.337357 0.306857 0.394672 -0.372356 -0.450091 -0.240434 0.165141 1.695138 0.345150
214 1.761253 -0.591066 -0.690824 2.065965 -0.721336 -0.340791 -0.483151 0.855908 0.529996 -1.116013 0.527710 -0.050391
215 1.490806 -1.368871 -1.151960 2.846487 -0.924825 0.052478 -0.970103 1.992593 0.913611 -0.452243 0.613814 -1.529552
216 0.191801 -1.348512 -1.315236 -0.455163 -1.244101 -1.240530 -1.036865 -0.531344 1.465231 -1.809822 -1.350349 -1.513618
217 -0.002098 2.039653 -0.752917 0.971355 -0.795869 0.431147 -0.753214 0.043687 1.465231 -1.105865 -0.938582 -0.984328
218 -0.098688 -0.923087 -0.917548 0.312310 -0.183969 0.248120 -0.545773 0.584070 0.733937 -0.697562 1.695138 -0.497086

219 rows × 12 columns

In [519]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[519]:
[2628.0,
 2183.3505259693475,
 1946.7709026205612,
 1764.2381744586387,
 1667.52081260375,
 1588.695626080069,
 1521.6546082793252,
 1465.3583612489235,
 1381.9950671234758,
 1338.0035802619554,
 1275.054416118868,
 1244.4334212962274,
 1201.2393010186188,
 1166.7934845697623]
In [520]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[520]:
[<matplotlib.lines.Line2D at 0x1b82f66a198>]

K=2

In [521]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[521]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [522]:
kmeans_ch.labels_
Out[522]:
array([0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
       1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
       1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0,
       0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
       0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1,
       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
In [523]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[523]:
array([0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
       1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
       0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
       1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
       1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0,
       0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
       0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1,
       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
In [524]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [525]:
X
Out[525]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 Cluster chosen
0 1.752761 -1.114598 -0.348132 2.966304 -0.925235 0.552064 -0.710296 0.412009 -0.784684 -1.667162 -0.834151 -1.566379 0 0
1 -1.387006 -0.333039 0.041297 -0.917052 0.656635 -1.022407 -0.805166 -0.905135 -0.708805 1.299875 -0.948816 -1.413196 1 0
2 -0.628834 2.234144 0.613536 -0.978644 0.836157 -0.735689 -0.059767 -1.571350 1.278264 -1.103616 -1.153426 0.685062 1 0
3 0.081693 1.765530 -0.365668 0.759057 -1.136519 -0.071939 -0.412587 -1.310708 1.465231 -1.266573 0.040076 -0.308065 0 0
4 0.056206 1.646501 0.508800 0.525847 0.506842 -0.390517 -0.241209 -0.409725 1.465231 -0.148455 -0.354779 0.188297 0 0
5 -0.475284 0.673200 -1.077774 0.360339 -0.032017 0.910768 1.405699 -0.748908 1.465231 -0.933838 -0.225835 -0.922170 0 0
6 0.163433 -0.011304 -1.057752 1.128932 0.026597 1.324397 -0.060004 -0.869144 1.465231 -0.902156 0.598522 -1.044263 0 0
7 0.110500 0.178211 -1.394026 0.897710 -1.243991 -0.384906 -0.976009 -1.391712 1.465231 -1.735560 -0.662454 -1.567335 0 0
8 0.595793 -0.898440 1.924099 0.051119 0.528166 0.377574 -0.881206 1.992593 -0.169332 0.289379 -1.264549 -0.986278 1 0
9 -0.017542 -1.386211 0.574605 -1.268664 -0.911663 -1.241512 -1.036865 1.992592 -1.595580 0.436331 -1.528923 -1.224807 1 0
10 0.796579 -1.216664 -0.112327 -0.690935 1.077368 0.874900 -0.528379 1.992593 -0.347105 1.295264 0.069115 -0.797501 1 0
11 1.761253 -0.949116 -0.297777 -0.913826 -0.875567 -0.968315 -0.407542 -0.016419 -0.556271 0.391283 0.743737 0.460341 1 0
12 1.055147 -0.394702 1.258031 -0.517345 -0.021328 -0.557321 0.669125 1.243976 0.262937 0.337908 0.485227 2.007205 1 0
13 1.761253 -0.435392 0.592085 -0.692391 0.535758 -0.708164 -0.382176 0.125232 -0.083947 0.888176 0.994199 1.087612 1 0
14 -0.137413 -0.980041 -1.297302 -0.880795 1.395884 -0.901503 -0.756382 -0.304071 -1.277311 1.299875 -1.162641 -1.274290 1 0
15 0.017749 -0.936126 -1.136240 -0.862914 0.782285 -0.230220 -0.734923 -0.295573 -1.221528 1.299875 -1.058979 -1.237947 1 0
16 -0.382429 -1.386211 -1.437693 -1.268664 0.481905 -1.219324 -1.036865 0.284908 -1.749408 1.299875 -1.532659 -1.596787 1 0
17 -1.227998 1.294887 2.200026 -0.318659 2.364886 -1.078770 -0.029818 -1.427905 -1.667310 0.632080 -1.532496 0.393871 1 0
18 -1.076569 -0.776099 -0.576736 -0.929915 0.188222 -1.067980 0.156056 -0.671783 -0.806376 1.299875 -0.994450 -0.887100 1 0
19 -1.180746 0.385053 2.200026 -0.005979 1.302093 -0.928084 -0.007355 -0.793418 -1.747571 -0.917240 -1.528514 -0.042363 1 0
20 1.192498 -0.224351 -0.206238 0.217971 0.304072 0.949289 -0.514273 1.091683 1.465231 -0.718685 -0.298094 -0.264379 0 0
21 1.213472 0.048184 -0.600330 0.007879 0.177356 1.215438 -0.394095 0.833193 1.465231 -0.381808 -0.415137 -0.479222 0 0
22 -0.004572 -0.052026 2.200026 -0.008377 1.967963 -0.419263 -0.302302 0.046448 -0.343181 1.114245 -0.245566 0.544195 1 0
23 -0.229893 0.630662 0.484895 0.048344 -0.498767 0.662733 -0.509073 0.013777 1.465231 0.438954 -0.681106 -0.745425 0 0
24 0.217943 -0.177066 0.659322 -0.134414 1.650468 -0.583176 -0.559211 -0.406584 -0.598204 1.299875 -0.406852 0.234437 1 0
25 -0.333597 0.972324 -0.721724 -0.554449 -0.493410 0.518707 0.321268 -1.070955 -0.564510 -0.833869 1.695138 -0.783758 0 0
26 -0.602430 -0.028610 -0.826715 -0.377974 -0.696055 -0.182813 -0.120202 -0.886178 -0.584506 -0.673011 1.695138 -0.599349 0 0
27 -0.251812 -0.171571 -0.358821 -0.244636 -0.013843 0.945000 1.038022 -0.727984 0.773167 -0.048067 1.695138 0.563056 0 0
28 -0.368678 2.234146 -0.330756 -0.138877 -0.559553 -0.851160 -0.744847 -0.884446 -0.828451 -1.011890 -0.568588 0.130393 0 0
29 -0.578621 2.234146 -0.562376 -0.105376 -0.523903 -0.825786 -0.690314 -0.809436 -0.870132 -1.142574 -0.536734 0.859716 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
189 -0.186741 -1.165084 -0.921181 1.397816 -0.045251 1.304725 -0.153535 1.038843 1.465231 0.175798 1.209572 -0.558994 0 1
190 -0.163966 0.884601 1.657605 -0.260803 1.379747 -0.732537 -0.007962 1.276498 -0.419568 1.299875 -0.588765 1.807669 1 1
191 -0.254097 -0.617571 0.535718 -0.297186 1.865594 -0.310035 0.916200 1.349280 -0.168599 1.299875 -0.479982 1.556484 1 1
192 -1.359431 0.228998 0.259367 -1.169606 0.516753 -1.080823 1.794240 -0.257346 -1.633246 0.226711 -1.071882 2.007205 1 1
193 1.622617 2.234146 -1.127605 -0.273622 -1.141311 0.831169 -0.681956 -1.023930 0.097381 -1.505036 -0.432936 -0.703132 0 1
194 -0.467818 -0.314482 0.014154 0.025208 -0.807816 -0.093075 3.055878 0.013994 -0.514838 -0.698969 0.430445 -0.302544 0 1
195 -1.423661 -0.599038 -0.707969 -1.084898 -1.206212 -0.474886 1.273960 -1.605019 -1.163780 -1.777082 1.695138 -1.000177 0 1
196 1.761253 -0.325612 0.357954 -0.709817 1.043699 -0.927401 -0.653399 -0.107407 -0.553233 -0.078311 -0.687092 -0.080809 1 1
197 1.761253 1.028574 1.374947 0.116683 1.136600 -0.187955 -0.195137 0.341797 -0.856237 0.366485 -0.658035 0.645642 1 1
198 1.761253 0.050043 0.687757 -0.434894 0.705291 -0.680095 -0.549325 0.816661 -0.567882 0.584770 -0.302600 -0.144573 1 1
199 -0.919189 -0.200271 -1.032880 -0.020720 -1.069924 -0.649964 -0.843737 -0.600356 1.465231 -0.271885 0.807204 -0.901281 0 1
200 -0.988568 -0.189392 -1.028379 0.102199 -1.085401 -0.667213 -0.580786 -0.498427 1.465231 -0.287173 0.737684 -0.938325 0 1
201 -0.779270 0.427427 -0.948100 -0.165441 -1.033638 0.272258 -0.022274 -0.198353 1.465231 -0.072695 1.561935 -0.848980 0 1
202 1.116127 -1.139042 0.168460 -1.122851 -0.854043 1.067951 -0.735589 0.656822 0.343006 1.299875 0.776152 -0.247153 0 1
203 0.762440 -1.190866 -0.050517 -1.116856 -1.095215 0.632777 -0.797158 1.167398 1.149430 1.299875 1.392402 0.047424 0 1
204 0.058411 -1.128496 -1.328850 -1.229911 -1.074617 -0.085788 -0.812628 0.326031 0.625580 1.299875 1.290873 -0.077297 0 1
205 -0.720270 -0.697672 -0.720394 1.813110 -0.859595 -0.861783 -0.908744 1.216314 1.411592 -0.405169 1.695138 -0.439681 0 1
206 -0.364461 -0.010160 -1.095500 0.914425 -1.040229 0.885108 -0.998962 0.772555 1.465231 -0.838613 0.940082 -0.955178 0 1
207 -0.272236 -0.196860 -0.727982 0.141418 -0.904008 -0.065613 -0.944991 0.795648 0.611803 0.903563 1.695137 -0.158991 0 1
208 1.761253 -0.082523 -0.122308 0.265346 -0.485445 0.500219 0.126422 -0.354469 0.139245 -0.560750 1.149004 0.354466 0 1
209 1.573516 -0.161473 -0.359909 1.241760 -0.664508 1.002463 -0.076931 0.519366 1.465231 -0.646843 0.532185 -0.215564 0 1
210 1.761253 0.417875 -0.918851 -0.929813 -0.982357 -0.771036 -0.949451 -0.679224 -0.652751 -1.604058 -0.786724 -0.848956 0 1
211 1.761253 0.145550 1.710575 1.914520 1.233461 2.474950 0.953725 1.263177 0.830563 0.011243 0.602765 -0.024981 0 1
212 0.470346 -0.334996 2.200026 1.858653 0.847856 1.364055 0.061293 1.366431 -0.301856 -0.501569 -0.235203 -0.695720 0 1
213 -0.576457 -0.914445 1.070087 0.337357 0.306857 0.394672 -0.372356 -0.450091 -0.240434 0.165141 1.695138 0.345150 0 1
214 1.761253 -0.591066 -0.690824 2.065965 -0.721336 -0.340791 -0.483151 0.855908 0.529996 -1.116013 0.527710 -0.050391 0 1
215 1.490806 -1.368871 -1.151960 2.846487 -0.924825 0.052478 -0.970103 1.992593 0.913611 -0.452243 0.613814 -1.529552 0 1
216 0.191801 -1.348512 -1.315236 -0.455163 -1.244101 -1.240530 -1.036865 -0.531344 1.465231 -1.809822 -1.350349 -1.513618 0 1
217 -0.002098 2.039653 -0.752917 0.971355 -0.795869 0.431147 -0.753214 0.043687 1.465231 -1.105865 -0.938582 -0.984328 0 1
218 -0.098688 -0.923087 -0.917548 0.312310 -0.183969 0.248120 -0.545773 0.584070 0.733937 -0.697562 1.695138 -0.497086 0 1

219 rows × 14 columns

In [526]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[526]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b829dddc50>
In [527]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

Urban Place

ANN

In [344]:
X = df_n_ps_std_ch[5]
In [345]:
y = df_n_ps[5]['chosen']
In [346]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [347]:
X_train.shape
Out[347]:
(162, 12)
In [348]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [349]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [350]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [535]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
Los parámetros del mejor modelo fueron {'activation': 'relu', 'hidden_layer_sizes': (10, 10, 10), 'learning_rate_init': 0.006, 'max_iter': 200}, que permiten obtener un Accuracy de 79.63% y un Kappa del 53.46
Tiempo total: 24.75 minutos
In [351]:
grid.best_params_={'activation': 'relu', 'hidden_layer_sizes': (10, 10, 10), 'learning_rate_init': 0.006, 'max_iter': 200}
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [352]:
input_tensor = Input(shape = (n0,))
In [353]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [354]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [355]:
model.summary()
Model: "model_19"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_21 (InputLayer)        (None, 12)                0         
_________________________________________________________________
dense_55 (Dense)             (None, 10)                130       
_________________________________________________________________
dense_56 (Dense)             (None, 10)                110       
_________________________________________________________________
dense_57 (Dense)             (None, 10)                110       
_________________________________________________________________
dense_58 (Dense)             (None, 1)                 11        
=================================================================
Total params: 361
Trainable params: 361
Non-trainable params: 0
_________________________________________________________________
In [356]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
Train on 162 samples, validate on 54 samples
Epoch 1/200
162/162 [==============================] - 0s 2ms/step - loss: 0.7995 - accuracy: 0.3272 - val_loss: 0.7472 - val_accuracy: 0.3704
Epoch 2/200
162/162 [==============================] - 0s 86us/step - loss: 0.7034 - accuracy: 0.4815 - val_loss: 0.6900 - val_accuracy: 0.5741
Epoch 3/200
162/162 [==============================] - 0s 93us/step - loss: 0.6763 - accuracy: 0.6728 - val_loss: 0.6687 - val_accuracy: 0.6296
Epoch 4/200
162/162 [==============================] - 0s 86us/step - loss: 0.6577 - accuracy: 0.7099 - val_loss: 0.6443 - val_accuracy: 0.6296
Epoch 5/200
162/162 [==============================] - 0s 86us/step - loss: 0.6404 - accuracy: 0.6914 - val_loss: 0.6290 - val_accuracy: 0.6667
Epoch 6/200
162/162 [==============================] - 0s 93us/step - loss: 0.6270 - accuracy: 0.7037 - val_loss: 0.6180 - val_accuracy: 0.6667
Epoch 7/200
162/162 [==============================] - 0s 86us/step - loss: 0.6115 - accuracy: 0.7037 - val_loss: 0.6109 - val_accuracy: 0.6667
Epoch 8/200
162/162 [==============================] - 0s 86us/step - loss: 0.5949 - accuracy: 0.7099 - val_loss: 0.5968 - val_accuracy: 0.7037
Epoch 9/200
162/162 [==============================] - 0s 99us/step - loss: 0.5784 - accuracy: 0.7037 - val_loss: 0.5870 - val_accuracy: 0.7037
Epoch 10/200
162/162 [==============================] - 0s 111us/step - loss: 0.5631 - accuracy: 0.7099 - val_loss: 0.5827 - val_accuracy: 0.7037
Epoch 11/200
162/162 [==============================] - 0s 93us/step - loss: 0.5504 - accuracy: 0.7099 - val_loss: 0.5718 - val_accuracy: 0.6852
Epoch 12/200
162/162 [==============================] - 0s 93us/step - loss: 0.5415 - accuracy: 0.7222 - val_loss: 0.5613 - val_accuracy: 0.6852
Epoch 13/200
162/162 [==============================] - 0s 86us/step - loss: 0.5291 - accuracy: 0.7284 - val_loss: 0.5476 - val_accuracy: 0.6852
Epoch 14/200
162/162 [==============================] - 0s 86us/step - loss: 0.5194 - accuracy: 0.7160 - val_loss: 0.5379 - val_accuracy: 0.7037
Epoch 15/200
162/162 [==============================] - 0s 80us/step - loss: 0.5190 - accuracy: 0.7346 - val_loss: 0.5277 - val_accuracy: 0.7222
Epoch 16/200
162/162 [==============================] - 0s 86us/step - loss: 0.5023 - accuracy: 0.7469 - val_loss: 0.5318 - val_accuracy: 0.7778
Epoch 17/200
162/162 [==============================] - 0s 86us/step - loss: 0.4821 - accuracy: 0.7840 - val_loss: 0.5357 - val_accuracy: 0.7963
Epoch 18/200
162/162 [==============================] - 0s 86us/step - loss: 0.4650 - accuracy: 0.7840 - val_loss: 0.5308 - val_accuracy: 0.7963
Epoch 19/200
162/162 [==============================] - 0s 86us/step - loss: 0.4609 - accuracy: 0.7901 - val_loss: 0.5470 - val_accuracy: 0.7222
Epoch 20/200
162/162 [==============================] - 0s 80us/step - loss: 0.4549 - accuracy: 0.7901 - val_loss: 0.5606 - val_accuracy: 0.7222
Epoch 21/200
162/162 [==============================] - 0s 93us/step - loss: 0.4465 - accuracy: 0.8025 - val_loss: 0.5559 - val_accuracy: 0.7222
Epoch 22/200
162/162 [==============================] - 0s 74us/step - loss: 0.4472 - accuracy: 0.7840 - val_loss: 0.5403 - val_accuracy: 0.7407
Epoch 23/200
162/162 [==============================] - 0s 74us/step - loss: 0.4687 - accuracy: 0.7654 - val_loss: 0.5305 - val_accuracy: 0.7778
Epoch 24/200
162/162 [==============================] - 0s 123us/step - loss: 0.4412 - accuracy: 0.7593 - val_loss: 0.5300 - val_accuracy: 0.7963
Epoch 25/200
162/162 [==============================] - 0s 105us/step - loss: 0.4162 - accuracy: 0.7901 - val_loss: 0.5529 - val_accuracy: 0.7222
Epoch 26/200
162/162 [==============================] - 0s 99us/step - loss: 0.4090 - accuracy: 0.8210 - val_loss: 0.5396 - val_accuracy: 0.7593
Epoch 27/200
162/162 [==============================] - 0s 86us/step - loss: 0.3954 - accuracy: 0.8333 - val_loss: 0.5259 - val_accuracy: 0.7407

Epoch 00027: ReduceLROnPlateau reducing learning rate to 0.003000000026077032.
Epoch 28/200
162/162 [==============================] - 0s 86us/step - loss: 0.3960 - accuracy: 0.8272 - val_loss: 0.5284 - val_accuracy: 0.7222
Epoch 29/200
162/162 [==============================] - 0s 93us/step - loss: 0.3958 - accuracy: 0.8272 - val_loss: 0.5243 - val_accuracy: 0.7593
Epoch 30/200
162/162 [==============================] - 0s 80us/step - loss: 0.3860 - accuracy: 0.8210 - val_loss: 0.5192 - val_accuracy: 0.7778
Epoch 31/200
162/162 [==============================] - 0s 99us/step - loss: 0.3801 - accuracy: 0.8210 - val_loss: 0.5134 - val_accuracy: 0.7778
Epoch 32/200
162/162 [==============================] - 0s 111us/step - loss: 0.3740 - accuracy: 0.8272 - val_loss: 0.5115 - val_accuracy: 0.7778
Epoch 33/200
162/162 [==============================] - 0s 123us/step - loss: 0.3666 - accuracy: 0.8333 - val_loss: 0.5086 - val_accuracy: 0.7963
Epoch 34/200
162/162 [==============================] - 0s 111us/step - loss: 0.3621 - accuracy: 0.8272 - val_loss: 0.5083 - val_accuracy: 0.7963
Epoch 35/200
162/162 [==============================] - 0s 105us/step - loss: 0.3571 - accuracy: 0.8272 - val_loss: 0.5057 - val_accuracy: 0.7963
Epoch 36/200
162/162 [==============================] - 0s 111us/step - loss: 0.3521 - accuracy: 0.8333 - val_loss: 0.5058 - val_accuracy: 0.7778
Epoch 37/200
162/162 [==============================] - 0s 111us/step - loss: 0.3466 - accuracy: 0.8333 - val_loss: 0.5061 - val_accuracy: 0.7963

Epoch 00037: ReduceLROnPlateau reducing learning rate to 0.001500000013038516.
Epoch 38/200
162/162 [==============================] - 0s 105us/step - loss: 0.3428 - accuracy: 0.8395 - val_loss: 0.5041 - val_accuracy: 0.7963
Epoch 39/200
162/162 [==============================] - 0s 111us/step - loss: 0.3407 - accuracy: 0.8457 - val_loss: 0.5080 - val_accuracy: 0.7778
Epoch 40/200
162/162 [==============================] - 0s 111us/step - loss: 0.3380 - accuracy: 0.8395 - val_loss: 0.5229 - val_accuracy: 0.7778
Epoch 41/200
162/162 [==============================] - 0s 117us/step - loss: 0.3401 - accuracy: 0.8457 - val_loss: 0.5307 - val_accuracy: 0.7778
Epoch 42/200
162/162 [==============================] - 0s 105us/step - loss: 0.3417 - accuracy: 0.8395 - val_loss: 0.5363 - val_accuracy: 0.7778
Epoch 43/200
162/162 [==============================] - 0s 117us/step - loss: 0.3384 - accuracy: 0.8395 - val_loss: 0.5282 - val_accuracy: 0.7778
Epoch 44/200
162/162 [==============================] - 0s 111us/step - loss: 0.3347 - accuracy: 0.8395 - val_loss: 0.5174 - val_accuracy: 0.7778
Epoch 45/200
162/162 [==============================] - 0s 117us/step - loss: 0.3302 - accuracy: 0.8395 - val_loss: 0.5105 - val_accuracy: 0.7778
Epoch 46/200
162/162 [==============================] - 0s 117us/step - loss: 0.3280 - accuracy: 0.8457 - val_loss: 0.5056 - val_accuracy: 0.7778
Epoch 47/200
162/162 [==============================] - ETA: 0s - loss: 0.2933 - accuracy: 0.87 - 0s 142us/step - loss: 0.3258 - accuracy: 0.8519 - val_loss: 0.5080 - val_accuracy: 0.7778

Epoch 00047: ReduceLROnPlateau reducing learning rate to 0.000750000006519258.
Epoch 48/200
162/162 [==============================] - 0s 123us/step - loss: 0.3239 - accuracy: 0.8642 - val_loss: 0.5072 - val_accuracy: 0.7778
Epoch 49/200
162/162 [==============================] - 0s 136us/step - loss: 0.3227 - accuracy: 0.8580 - val_loss: 0.5070 - val_accuracy: 0.7778
Epoch 50/200
162/162 [==============================] - 0s 117us/step - loss: 0.3219 - accuracy: 0.8580 - val_loss: 0.5088 - val_accuracy: 0.7778
Epoch 51/200
162/162 [==============================] - 0s 130us/step - loss: 0.3200 - accuracy: 0.8642 - val_loss: 0.5089 - val_accuracy: 0.7778
Epoch 52/200
162/162 [==============================] - 0s 105us/step - loss: 0.3190 - accuracy: 0.8580 - val_loss: 0.5070 - val_accuracy: 0.7778
Epoch 53/200
162/162 [==============================] - 0s 111us/step - loss: 0.3170 - accuracy: 0.8642 - val_loss: 0.5048 - val_accuracy: 0.7778
Epoch 54/200
162/162 [==============================] - ETA: 0s - loss: 0.4040 - accuracy: 0.81 - 0s 117us/step - loss: 0.3167 - accuracy: 0.8580 - val_loss: 0.5008 - val_accuracy: 0.7778
Epoch 55/200
162/162 [==============================] - 0s 111us/step - loss: 0.3155 - accuracy: 0.8642 - val_loss: 0.4981 - val_accuracy: 0.7778
Epoch 56/200
162/162 [==============================] - 0s 111us/step - loss: 0.3149 - accuracy: 0.8642 - val_loss: 0.4947 - val_accuracy: 0.7963
Epoch 57/200
162/162 [==============================] - 0s 111us/step - loss: 0.3135 - accuracy: 0.8642 - val_loss: 0.4910 - val_accuracy: 0.7963

Epoch 00057: ReduceLROnPlateau reducing learning rate to 0.000375000003259629.
Epoch 58/200
162/162 [==============================] - 0s 123us/step - loss: 0.3136 - accuracy: 0.8704 - val_loss: 0.4902 - val_accuracy: 0.7963
Epoch 59/200
162/162 [==============================] - 0s 142us/step - loss: 0.3132 - accuracy: 0.8704 - val_loss: 0.4900 - val_accuracy: 0.7963
Epoch 60/200
162/162 [==============================] - 0s 123us/step - loss: 0.3127 - accuracy: 0.8704 - val_loss: 0.4898 - val_accuracy: 0.7963
Epoch 61/200
162/162 [==============================] - 0s 111us/step - loss: 0.3121 - accuracy: 0.8704 - val_loss: 0.4903 - val_accuracy: 0.7963
Epoch 62/200
162/162 [==============================] - 0s 111us/step - loss: 0.3114 - accuracy: 0.8704 - val_loss: 0.4911 - val_accuracy: 0.7963
Epoch 63/200
162/162 [==============================] - 0s 111us/step - loss: 0.3105 - accuracy: 0.8704 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 64/200
162/162 [==============================] - 0s 123us/step - loss: 0.3088 - accuracy: 0.8704 - val_loss: 0.4946 - val_accuracy: 0.7963
Epoch 65/200
162/162 [==============================] - 0s 105us/step - loss: 0.3078 - accuracy: 0.8765 - val_loss: 0.4955 - val_accuracy: 0.7963
Epoch 66/200
162/162 [==============================] - 0s 105us/step - loss: 0.3076 - accuracy: 0.8765 - val_loss: 0.4960 - val_accuracy: 0.7963
Epoch 67/200
162/162 [==============================] - 0s 105us/step - loss: 0.3070 - accuracy: 0.8704 - val_loss: 0.4962 - val_accuracy: 0.7963

Epoch 00067: ReduceLROnPlateau reducing learning rate to 0.0001875000016298145.
Epoch 68/200
162/162 [==============================] - 0s 105us/step - loss: 0.3065 - accuracy: 0.8704 - val_loss: 0.4960 - val_accuracy: 0.7963
Epoch 69/200
162/162 [==============================] - 0s 117us/step - loss: 0.3063 - accuracy: 0.8704 - val_loss: 0.4959 - val_accuracy: 0.7963
Epoch 70/200
162/162 [==============================] - 0s 117us/step - loss: 0.3061 - accuracy: 0.8704 - val_loss: 0.4957 - val_accuracy: 0.7963
Epoch 71/200
162/162 [==============================] - 0s 111us/step - loss: 0.3060 - accuracy: 0.8765 - val_loss: 0.4944 - val_accuracy: 0.7963
Epoch 72/200
162/162 [==============================] - 0s 123us/step - loss: 0.3059 - accuracy: 0.8765 - val_loss: 0.4937 - val_accuracy: 0.7963
Epoch 73/200
162/162 [==============================] - 0s 111us/step - loss: 0.3058 - accuracy: 0.8827 - val_loss: 0.4936 - val_accuracy: 0.7963
Epoch 74/200
162/162 [==============================] - 0s 105us/step - loss: 0.3057 - accuracy: 0.8827 - val_loss: 0.4934 - val_accuracy: 0.7963
Epoch 75/200
162/162 [==============================] - 0s 111us/step - loss: 0.3055 - accuracy: 0.8827 - val_loss: 0.4933 - val_accuracy: 0.7963
Epoch 76/200
162/162 [==============================] - 0s 117us/step - loss: 0.3052 - accuracy: 0.8765 - val_loss: 0.4926 - val_accuracy: 0.7963
Epoch 77/200
162/162 [==============================] - 0s 111us/step - loss: 0.3053 - accuracy: 0.8765 - val_loss: 0.4912 - val_accuracy: 0.7963

Epoch 00077: ReduceLROnPlateau reducing learning rate to 9.375000081490725e-05.
Epoch 78/200
162/162 [==============================] - 0s 111us/step - loss: 0.3051 - accuracy: 0.8765 - val_loss: 0.4909 - val_accuracy: 0.7963
Epoch 79/200
162/162 [==============================] - 0s 111us/step - loss: 0.3051 - accuracy: 0.8765 - val_loss: 0.4908 - val_accuracy: 0.7963
Epoch 80/200
162/162 [==============================] - 0s 117us/step - loss: 0.3049 - accuracy: 0.8765 - val_loss: 0.4909 - val_accuracy: 0.7963
Epoch 81/200
162/162 [==============================] - 0s 142us/step - loss: 0.3048 - accuracy: 0.8765 - val_loss: 0.4909 - val_accuracy: 0.7963
Epoch 82/200
162/162 [==============================] - 0s 142us/step - loss: 0.3046 - accuracy: 0.8765 - val_loss: 0.4910 - val_accuracy: 0.7963
Epoch 83/200
162/162 [==============================] - 0s 111us/step - loss: 0.3045 - accuracy: 0.8765 - val_loss: 0.4913 - val_accuracy: 0.7963
Epoch 84/200
162/162 [==============================] - 0s 105us/step - loss: 0.3043 - accuracy: 0.8765 - val_loss: 0.4909 - val_accuracy: 0.7963
Epoch 85/200
162/162 [==============================] - 0s 111us/step - loss: 0.3041 - accuracy: 0.8765 - val_loss: 0.4907 - val_accuracy: 0.7963
Epoch 86/200
162/162 [==============================] - 0s 105us/step - loss: 0.3040 - accuracy: 0.8765 - val_loss: 0.4908 - val_accuracy: 0.7963
Epoch 87/200
162/162 [==============================] - 0s 105us/step - loss: 0.3038 - accuracy: 0.8765 - val_loss: 0.4909 - val_accuracy: 0.7963

Epoch 00087: ReduceLROnPlateau reducing learning rate to 4.6875000407453626e-05.
Epoch 88/200
162/162 [==============================] - 0s 111us/step - loss: 0.3037 - accuracy: 0.8765 - val_loss: 0.4910 - val_accuracy: 0.7963
Epoch 89/200
162/162 [==============================] - ETA: 0s - loss: 0.2736 - accuracy: 0.93 - 0s 111us/step - loss: 0.3036 - accuracy: 0.8765 - val_loss: 0.4911 - val_accuracy: 0.7963
Epoch 90/200
162/162 [==============================] - 0s 117us/step - loss: 0.3035 - accuracy: 0.8765 - val_loss: 0.4911 - val_accuracy: 0.7963
Epoch 91/200
162/162 [==============================] - 0s 111us/step - loss: 0.3034 - accuracy: 0.8765 - val_loss: 0.4912 - val_accuracy: 0.7963
Epoch 92/200
162/162 [==============================] - 0s 136us/step - loss: 0.3033 - accuracy: 0.8765 - val_loss: 0.4913 - val_accuracy: 0.7963
Epoch 93/200
162/162 [==============================] - 0s 117us/step - loss: 0.3033 - accuracy: 0.8765 - val_loss: 0.4915 - val_accuracy: 0.7963
Epoch 94/200
162/162 [==============================] - 0s 111us/step - loss: 0.3031 - accuracy: 0.8765 - val_loss: 0.4919 - val_accuracy: 0.7963
Epoch 95/200
162/162 [==============================] - 0s 117us/step - loss: 0.3030 - accuracy: 0.8765 - val_loss: 0.4921 - val_accuracy: 0.7963
Epoch 96/200
162/162 [==============================] - 0s 123us/step - loss: 0.3030 - accuracy: 0.8827 - val_loss: 0.4923 - val_accuracy: 0.7963
Epoch 97/200
162/162 [==============================] - 0s 148us/step - loss: 0.3029 - accuracy: 0.8827 - val_loss: 0.4925 - val_accuracy: 0.7963

Epoch 00097: ReduceLROnPlateau reducing learning rate to 2.3437500203726813e-05.
Epoch 98/200
162/162 [==============================] - 0s 130us/step - loss: 0.3027 - accuracy: 0.8827 - val_loss: 0.4926 - val_accuracy: 0.7963
Epoch 99/200
162/162 [==============================] - 0s 111us/step - loss: 0.3027 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 100/200
162/162 [==============================] - 0s 111us/step - loss: 0.3027 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 101/200
162/162 [==============================] - 0s 117us/step - loss: 0.3026 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 102/200
162/162 [==============================] - 0s 111us/step - loss: 0.3026 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 103/200
162/162 [==============================] - 0s 111us/step - loss: 0.3025 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 104/200
162/162 [==============================] - 0s 117us/step - loss: 0.3025 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 105/200
162/162 [==============================] - 0s 111us/step - loss: 0.3025 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 106/200
162/162 [==============================] - 0s 99us/step - loss: 0.3024 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 107/200
162/162 [==============================] - 0s 111us/step - loss: 0.3024 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963

Epoch 00107: ReduceLROnPlateau reducing learning rate to 1.1718750101863407e-05.
Epoch 108/200
162/162 [==============================] - 0s 111us/step - loss: 0.3024 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 109/200
162/162 [==============================] - 0s 111us/step - loss: 0.3024 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 110/200
162/162 [==============================] - 0s 117us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 111/200
162/162 [==============================] - 0s 117us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 112/200
162/162 [==============================] - 0s 117us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 113/200
162/162 [==============================] - 0s 130us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4931 - val_accuracy: 0.7963
Epoch 114/200
162/162 [==============================] - 0s 136us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 115/200
162/162 [==============================] - 0s 105us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 116/200
162/162 [==============================] - 0s 117us/step - loss: 0.3023 - accuracy: 0.8827 - val_loss: 0.4930 - val_accuracy: 0.7963
Epoch 117/200
162/162 [==============================] - 0s 123us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963

Epoch 00117: ReduceLROnPlateau reducing learning rate to 5.859375050931703e-06.
Epoch 118/200
162/162 [==============================] - 0s 148us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 119/200
162/162 [==============================] - 0s 123us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 120/200
162/162 [==============================] - 0s 117us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 121/200
162/162 [==============================] - 0s 123us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 122/200
162/162 [==============================] - 0s 123us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 123/200
162/162 [==============================] - 0s 111us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 124/200
162/162 [==============================] - 0s 111us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 125/200
162/162 [==============================] - 0s 105us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 126/200
162/162 [==============================] - 0s 117us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 127/200
162/162 [==============================] - 0s 111us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00127: ReduceLROnPlateau reducing learning rate to 2.9296875254658516e-06.
Epoch 128/200
162/162 [==============================] - ETA: 0s - loss: 0.3428 - accuracy: 0.87 - 0s 117us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 129/200
162/162 [==============================] - 0s 105us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 130/200
162/162 [==============================] - 0s 105us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 131/200
162/162 [==============================] - 0s 111us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 132/200
162/162 [==============================] - 0s 117us/step - loss: 0.3022 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 133/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 134/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 135/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 136/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 137/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963

Epoch 00137: ReduceLROnPlateau reducing learning rate to 1.4648437627329258e-06.
Epoch 138/200
162/162 [==============================] - 0s 136us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 139/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 140/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 141/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 142/200
162/162 [==============================] - 0s 123us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 143/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4929 - val_accuracy: 0.7963
Epoch 144/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 145/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 146/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 147/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00147: ReduceLROnPlateau reducing learning rate to 7.324218813664629e-07.
Epoch 148/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 149/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 150/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 151/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 152/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 153/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 154/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 155/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 156/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 157/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00157: ReduceLROnPlateau reducing learning rate to 3.6621094068323146e-07.
Epoch 158/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 159/200
162/162 [==============================] - 0s 99us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 160/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 161/200
162/162 [==============================] - 0s 148us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 162/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 163/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 164/200
162/162 [==============================] - 0s 123us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 165/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 166/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 167/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00167: ReduceLROnPlateau reducing learning rate to 1.8310547034161573e-07.
Epoch 168/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 169/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 170/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 171/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 172/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 173/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 174/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 175/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 176/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 177/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00177: ReduceLROnPlateau reducing learning rate to 9.155273517080786e-08.
Epoch 178/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 179/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 180/200
162/162 [==============================] - ETA: 0s - loss: 0.3056 - accuracy: 0.81 - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 181/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 182/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 183/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 184/200
162/162 [==============================] - 0s 99us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 185/200
162/162 [==============================] - 0s 130us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 186/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 187/200
162/162 [==============================] - ETA: 0s - loss: 0.3155 - accuracy: 0.78 - 0s 142us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00187: ReduceLROnPlateau reducing learning rate to 4.577636758540393e-08.
Epoch 188/200
162/162 [==============================] - 0s 136us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 189/200
162/162 [==============================] - 0s 130us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 190/200
162/162 [==============================] - 0s 123us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 191/200
162/162 [==============================] - 0s 123us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 192/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 193/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 194/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 195/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 196/200
162/162 [==============================] - 0s 117us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 197/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963

Epoch 00197: ReduceLROnPlateau reducing learning rate to 2.2888183792701966e-08.
Epoch 198/200
162/162 [==============================] - 0s 111us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 199/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
Epoch 200/200
162/162 [==============================] - 0s 105us/step - loss: 0.3021 - accuracy: 0.8827 - val_loss: 0.4928 - val_accuracy: 0.7963
In [357]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
range(0, 200)
In [358]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
54/54 [==============================] - 0s 56us/step
test loss: 0.49283186263508266, test accuracy: 0.7962962985038757
In [359]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
AUC ROC:  0.8210526315789474
In [360]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Kappa:  0.547945205479452
[[30  5]
 [ 6 13]]

KMeans

In [546]:
X
Out[546]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12
0 -0.592360 -0.453705 2.211117 -0.229901 0.645559 -0.393204 0.424372 -0.229283 -0.693774 1.147599 -0.220378 0.262481
1 -0.441523 1.666549 -0.417753 1.148299 1.047397 -0.025553 0.529401 -0.322715 0.788981 -0.702420 -0.209698 1.924655
2 -0.397413 1.044025 0.141778 1.742806 0.991984 0.907397 1.476307 0.831252 1.523918 0.670640 -0.046392 0.497913
3 -1.385913 -1.136470 -1.159142 -0.779634 -1.287956 -1.177352 -1.012282 0.555960 1.314559 0.253537 1.699829 -1.383790
4 -0.410804 -0.664148 -0.936463 -0.072578 -1.082182 -0.957161 -1.022927 -0.233777 0.259301 -0.358189 1.699829 -0.811341
5 0.654434 -0.480395 -0.564938 0.557637 -0.695855 -0.602536 -0.825163 -0.588546 1.523917 -0.823244 0.754639 -0.645843
6 -0.375055 -1.465338 1.649386 -1.257929 2.008574 -0.563748 1.506384 0.579723 -1.744781 -0.548854 -1.508425 -0.391068
7 0.064972 -0.292924 1.874810 0.363856 1.341844 0.251944 1.013951 1.655213 0.115402 1.527592 -0.325145 0.025517
8 0.457188 -1.280226 0.508305 0.536904 2.008575 0.193572 -0.498497 0.270656 -1.409592 -0.431942 -1.096868 -1.161518
9 0.264205 -0.451954 0.540019 -0.518915 0.759702 2.591054 -0.083006 0.304222 -0.558774 0.300192 -0.287566 -0.493630
10 1.615916 -0.476284 0.254007 -0.624451 0.097446 0.217831 -0.040382 1.768809 -0.319326 0.979400 0.016016 -0.153245
11 0.834285 -1.405123 0.704867 -0.689665 2.008575 1.203152 -1.002498 -0.667904 -1.688291 -1.156923 -0.737306 -0.879992
12 -0.368007 0.683133 -0.474222 0.547694 -0.820217 0.364380 -0.474588 -0.674021 1.523918 -0.793291 1.078068 -0.457193
13 -0.839289 0.306633 -0.853660 0.723116 0.435777 -0.879183 0.335639 0.593469 0.708144 -0.396821 -0.242968 1.924655
14 -0.319898 0.340082 -0.491559 0.701985 -0.842146 0.597909 -0.644831 -0.662251 1.523918 -0.575211 0.534899 -0.527162
15 -0.609565 0.412613 -0.695381 1.316178 -0.719769 0.865430 1.679917 -0.849334 1.523918 -0.735830 0.144609 0.242574
16 -0.294394 0.741619 0.151913 2.539991 0.416363 0.148867 1.677200 0.822220 1.003847 -0.526991 0.785121 -0.293878
17 1.116695 -0.420423 1.149167 0.301950 0.500878 1.773601 -0.060893 0.672677 0.289742 1.527593 1.283583 0.238430
18 0.842794 -0.575729 0.643169 -0.602296 -0.331996 1.529202 -0.867435 0.191979 -0.217921 0.807237 1.699829 0.130123
19 -0.030794 0.030288 0.462185 -0.769191 -0.543974 2.591054 -0.487337 -0.076347 -0.991742 0.900076 -0.242676 -1.415815
20 -1.324459 2.249029 -1.202104 -0.290955 -0.053323 -1.169169 -0.449254 -1.306597 -0.432854 -0.500100 -1.455507 0.028032
21 -1.391553 2.249029 -1.278978 -0.888575 -0.811278 -1.180328 0.198450 -1.316982 -0.927827 -1.028770 -1.496198 0.693363
22 -0.662886 0.307712 -0.805667 1.248429 -0.021261 -0.853836 0.505072 -0.109644 0.239978 -0.008774 -0.437828 1.924655
23 1.105419 -0.629615 0.090579 2.059265 -0.742273 0.287915 -0.889948 0.338598 0.769168 0.080426 1.699829 -0.385717
24 1.043642 -0.624779 0.129667 1.899266 -0.669239 0.492505 -0.813576 0.318395 0.576968 -0.083727 1.699829 -0.180141
25 0.301993 -0.705708 -0.070367 0.439151 -0.783839 1.504692 -0.883292 -0.324238 0.394206 -0.681481 1.699829 -0.468423
26 1.404180 -1.101276 -1.004741 0.000759 -1.064381 -0.992797 -0.971380 1.704361 0.968314 0.696274 1.699829 -0.198305
27 -0.634723 -0.430657 -0.787967 1.284818 -0.780406 -0.201832 0.438439 -0.741240 1.523918 -0.871410 -0.336990 -0.773167
28 0.317357 -0.327927 0.301217 -0.742175 0.030999 -0.632488 -0.114595 2.018795 -0.481880 0.192970 -0.259076 0.650325
29 -0.648337 -1.398422 0.413041 -1.220330 -0.442423 -1.123348 -0.408672 2.018795 -1.250844 -0.200036 -1.302509 -0.368660
... ... ... ... ... ... ... ... ... ... ... ... ...
186 0.393077 -0.250074 0.038107 -0.191832 -0.290416 1.007564 -0.703416 -0.310414 -0.798470 -0.796349 1.699829 -0.544372
187 -1.377643 -0.803877 2.211117 -0.707481 1.879500 0.084271 0.023163 -1.061854 -1.515997 0.877885 -1.504060 -1.062191
188 1.615917 0.379972 0.224721 -0.034053 1.116997 1.461618 -0.476350 1.089453 0.375577 -0.678774 0.197515 -0.407654
189 1.615917 -0.530983 -0.059684 -0.924859 0.297570 -0.788527 -0.863155 0.507705 -1.231423 0.671175 -1.167708 -1.068322
190 -0.315846 -1.045971 1.094836 -1.257540 -0.646452 -0.759760 -1.119000 1.113215 -1.727260 1.527592 -1.499848 -1.441971
191 -0.183414 -1.108627 0.780160 -1.125961 -0.797291 -0.594726 -1.084903 -0.937498 -1.226658 1.527592 -1.097546 -0.723262
192 1.615917 -1.079754 0.603712 -1.142855 -0.595133 -1.113955 -1.106489 0.175898 -1.304200 -0.527112 -1.252788 -0.999291
193 1.615917 -0.514731 1.852987 -0.738263 -0.091935 0.444710 -0.919840 0.338934 -0.676013 1.060982 -0.689200 -0.040139
194 -1.023096 -0.007046 -0.867797 -0.555651 -0.693210 -0.998105 0.001088 -0.891619 1.203890 -0.871836 -0.513026 1.924654
195 0.242106 -0.524011 -0.569132 -0.770429 1.699110 -0.415382 -0.903373 -0.100685 -0.974586 1.527592 -0.629775 -0.696446
196 -1.369321 -0.969464 2.211117 -1.188189 -0.580502 -1.060826 0.122236 -0.950228 -1.726779 0.105209 -1.501989 -0.426102
197 -1.015340 -0.530076 -0.283168 -1.082866 -0.835974 -1.147408 0.419459 -1.191474 -0.661554 1.527592 -0.997834 1.722317
198 -0.850883 -0.441126 0.243450 0.222028 1.249673 -0.347006 -0.107132 -0.257125 0.126194 1.527592 -0.422391 1.077974
199 1.615917 -0.957115 -0.818624 -0.751346 0.920566 0.942750 -0.885506 0.827017 -0.967859 0.218694 -0.831072 0.035852
200 1.615917 -1.094018 -1.161937 -0.817956 0.985373 0.188513 -0.904777 0.516967 -1.230226 1.160960 -0.827322 0.309158
201 1.615917 -1.168115 -1.198039 -0.967002 1.353491 1.155611 -0.998698 1.025620 -1.193487 -0.101131 -1.079050 0.318717
202 0.887584 -1.008954 1.434477 -0.648944 1.812023 -1.119854 0.217275 1.663147 -1.635939 -0.375805 -1.213970 1.924655
203 -0.281732 -0.334833 0.143734 -0.498620 0.200931 -0.330363 -0.167310 1.001306 -0.272888 0.327019 1.070418 1.924654
204 -0.135492 -0.180497 0.349882 -0.411950 0.523304 -0.164545 -0.419377 1.939169 -0.073775 0.044241 1.347349 1.924654
205 1.615917 -0.407161 1.545772 0.566414 0.881235 -0.072638 -0.448623 1.725723 -0.162715 1.468765 -0.660579 -0.516106
206 1.404839 -0.218548 0.899766 -0.292046 1.212597 1.247130 -0.134708 2.018795 -0.672111 0.036232 -0.857434 -0.755932
207 1.593455 -0.041945 1.395647 -0.303439 1.481579 0.972266 -0.240798 2.018795 -0.629725 -0.240328 -0.765081 -0.553387
208 1.395568 1.540836 -0.840747 0.193399 -0.891845 2.198621 -0.578460 -0.895624 -0.218762 -1.304397 1.699829 -0.937437
209 1.583225 1.673175 -1.151481 -0.093932 -1.043532 2.078341 -1.020401 -0.782673 -0.238115 -1.519520 1.699829 -1.128162
210 1.370331 1.252913 -1.149111 0.407119 -0.751369 1.134063 -0.818484 -1.134854 -0.210751 -1.317205 1.699829 -1.128242
211 1.041007 -1.290910 -1.008073 -1.187425 -0.642020 -0.253467 -1.115999 -1.223980 -1.338703 1.527592 -0.933271 -0.918089
212 1.615915 -1.334768 -0.029491 -1.257929 -0.839407 -1.180328 -1.119000 0.430783 -1.413857 -1.452312 -1.464581 -1.270209
213 -0.744293 0.244034 -1.095374 -0.843102 -0.676799 -1.167238 0.315258 -1.196862 -0.503734 0.774377 -0.704666 1.924655
214 0.406336 0.410463 -0.317774 0.569095 -0.738039 0.037857 -0.548951 -0.093707 -0.061980 -0.002270 1.699829 1.020268
215 -1.011813 -0.264498 -0.827835 -0.483839 1.047147 -0.994911 -0.261502 -0.762674 0.219646 0.586379 -0.527753 1.924654

216 rows × 12 columns

In [547]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
Out[547]:
[2592.0,
 2098.7293789346745,
 1732.4188990905318,
 1606.2630867887576,
 1500.2148167696203,
 1423.6001104969187,
 1355.4724990652844,
 1283.649878481895,
 1213.5364716691665,
 1172.587492994494,
 1128.5861906886926,
 1087.5581685344223,
 1057.5011291947897,
 1021.2765793863816]
In [548]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)
Out[548]:
[<matplotlib.lines.Line2D at 0x1b83176b470>]

K=6

In [549]:
kmeans_ch = KMeans(n_clusters=6, random_state=0, n_init=10)
kmeans_ch.fit(X)
Out[549]:
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
    n_clusters=6, n_init=10, n_jobs=1, precompute_distances='auto',
    random_state=0, tol=0.0001, verbose=0)
In [550]:
kmeans_ch.labels_
Out[550]:
array([1, 3, 3, 0, 0, 0, 1, 1, 2, 2, 2, 2, 0, 3, 0, 0, 5, 5, 5, 2, 4, 4,
       3, 5, 5, 5, 5, 0, 2, 2, 2, 1, 4, 1, 0, 0, 0, 5, 5, 5, 4, 4, 4, 0,
       5, 0, 1, 1, 3, 3, 4, 3, 5, 5, 0, 5, 4, 4, 3, 5, 5, 5, 0, 3, 3, 3,
       1, 1, 1, 5, 5, 5, 1, 3, 0, 3, 5, 4, 4, 4, 3, 3, 4, 3, 0, 3, 4, 4,
       2, 2, 2, 2, 2, 4, 4, 2, 3, 0, 0, 0, 4, 5, 5, 5, 4, 3, 3, 0, 5, 5,
       5, 0, 0, 4, 2, 0, 2, 2, 4, 4, 4, 2, 5, 2, 0, 0, 0, 0, 5, 3, 0, 3,
       3, 1, 1, 1, 5, 2, 3, 2, 1, 1, 1, 4, 2, 0, 2, 2, 0, 0, 2, 4, 4, 2,
       1, 1, 2, 0, 4, 0, 0, 5, 3, 0, 1, 0, 3, 3, 3, 5, 5, 5, 1, 3, 3, 2,
       5, 0, 0, 2, 0, 1, 2, 2, 2, 0, 5, 1, 5, 2, 2, 2, 2, 2, 3, 2, 1, 3,
       3, 2, 2, 2, 1, 3, 3, 2, 2, 2, 0, 0, 0, 2, 2, 3, 5, 3])
In [551]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
Out[551]:
array([1, 3, 3, 0, 0, 0, 1, 1, 2, 2, 2, 2, 0, 3, 0, 0, 5, 5, 5, 2, 4, 4,
       3, 5, 5, 5, 5, 0, 2, 2, 2, 1, 4, 1, 0, 0, 0, 5, 5, 5, 4, 4, 4, 0,
       5, 0, 1, 1, 3, 3, 4, 3, 5, 5, 0, 5, 4, 4, 3, 5, 5, 5, 0, 3, 3, 3,
       1, 1, 1, 5, 5, 5, 1, 3, 0, 3, 5, 4, 4, 4, 3, 3, 4, 3, 0, 3, 4, 4,
       2, 2, 2, 2, 2, 4, 4, 2, 3, 0, 0, 0, 4, 5, 5, 5, 4, 3, 3, 0, 5, 5,
       5, 0, 0, 4, 2, 0, 2, 2, 4, 4, 4, 2, 5, 2, 0, 0, 0, 0, 5, 3, 0, 3,
       3, 1, 1, 1, 5, 2, 3, 2, 1, 1, 1, 4, 2, 0, 2, 2, 0, 0, 2, 4, 4, 2,
       1, 1, 2, 0, 4, 0, 0, 5, 3, 0, 1, 0, 3, 3, 3, 5, 5, 5, 1, 3, 3, 2,
       5, 0, 0, 2, 0, 1, 2, 2, 2, 0, 5, 1, 5, 2, 2, 2, 2, 2, 3, 2, 1, 3,
       3, 2, 2, 2, 1, 3, 3, 2, 2, 2, 0, 0, 0, 2, 2, 3, 5, 3])
In [552]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [553]:
X
Out[553]:
chromagramfiles_1 chromagramfiles_2 chromagramfiles_3 chromagramfiles_4 chromagramfiles_5 chromagramfiles_6 chromagramfiles_7 chromagramfiles_8 chromagramfiles_9 chromagramfiles_10 chromagramfiles_11 chromagramfiles_12 Cluster chosen
0 -0.592360 -0.453705 2.211117 -0.229901 0.645559 -0.393204 0.424372 -0.229283 -0.693774 1.147599 -0.220378 0.262481 1 0
1 -0.441523 1.666549 -0.417753 1.148299 1.047397 -0.025553 0.529401 -0.322715 0.788981 -0.702420 -0.209698 1.924655 3 0
2 -0.397413 1.044025 0.141778 1.742806 0.991984 0.907397 1.476307 0.831252 1.523918 0.670640 -0.046392 0.497913 3 0
3 -1.385913 -1.136470 -1.159142 -0.779634 -1.287956 -1.177352 -1.012282 0.555960 1.314559 0.253537 1.699829 -1.383790 0 0
4 -0.410804 -0.664148 -0.936463 -0.072578 -1.082182 -0.957161 -1.022927 -0.233777 0.259301 -0.358189 1.699829 -0.811341 0 0
5 0.654434 -0.480395 -0.564938 0.557637 -0.695855 -0.602536 -0.825163 -0.588546 1.523917 -0.823244 0.754639 -0.645843 0 0
6 -0.375055 -1.465338 1.649386 -1.257929 2.008574 -0.563748 1.506384 0.579723 -1.744781 -0.548854 -1.508425 -0.391068 1 0
7 0.064972 -0.292924 1.874810 0.363856 1.341844 0.251944 1.013951 1.655213 0.115402 1.527592 -0.325145 0.025517 1 0
8 0.457188 -1.280226 0.508305 0.536904 2.008575 0.193572 -0.498497 0.270656 -1.409592 -0.431942 -1.096868 -1.161518 2 0
9 0.264205 -0.451954 0.540019 -0.518915 0.759702 2.591054 -0.083006 0.304222 -0.558774 0.300192 -0.287566 -0.493630 2 0
10 1.615916 -0.476284 0.254007 -0.624451 0.097446 0.217831 -0.040382 1.768809 -0.319326 0.979400 0.016016 -0.153245 2 0
11 0.834285 -1.405123 0.704867 -0.689665 2.008575 1.203152 -1.002498 -0.667904 -1.688291 -1.156923 -0.737306 -0.879992 2 0
12 -0.368007 0.683133 -0.474222 0.547694 -0.820217 0.364380 -0.474588 -0.674021 1.523918 -0.793291 1.078068 -0.457193 0 0
13 -0.839289 0.306633 -0.853660 0.723116 0.435777 -0.879183 0.335639 0.593469 0.708144 -0.396821 -0.242968 1.924655 3 0
14 -0.319898 0.340082 -0.491559 0.701985 -0.842146 0.597909 -0.644831 -0.662251 1.523918 -0.575211 0.534899 -0.527162 0 0
15 -0.609565 0.412613 -0.695381 1.316178 -0.719769 0.865430 1.679917 -0.849334 1.523918 -0.735830 0.144609 0.242574 0 0
16 -0.294394 0.741619 0.151913 2.539991 0.416363 0.148867 1.677200 0.822220 1.003847 -0.526991 0.785121 -0.293878 5 0
17 1.116695 -0.420423 1.149167 0.301950 0.500878 1.773601 -0.060893 0.672677 0.289742 1.527593 1.283583 0.238430 5 0
18 0.842794 -0.575729 0.643169 -0.602296 -0.331996 1.529202 -0.867435 0.191979 -0.217921 0.807237 1.699829 0.130123 5 0
19 -0.030794 0.030288 0.462185 -0.769191 -0.543974 2.591054 -0.487337 -0.076347 -0.991742 0.900076 -0.242676 -1.415815 2 0
20 -1.324459 2.249029 -1.202104 -0.290955 -0.053323 -1.169169 -0.449254 -1.306597 -0.432854 -0.500100 -1.455507 0.028032 4 0
21 -1.391553 2.249029 -1.278978 -0.888575 -0.811278 -1.180328 0.198450 -1.316982 -0.927827 -1.028770 -1.496198 0.693363 4 0
22 -0.662886 0.307712 -0.805667 1.248429 -0.021261 -0.853836 0.505072 -0.109644 0.239978 -0.008774 -0.437828 1.924655 3 0
23 1.105419 -0.629615 0.090579 2.059265 -0.742273 0.287915 -0.889948 0.338598 0.769168 0.080426 1.699829 -0.385717 5 0
24 1.043642 -0.624779 0.129667 1.899266 -0.669239 0.492505 -0.813576 0.318395 0.576968 -0.083727 1.699829 -0.180141 5 0
25 0.301993 -0.705708 -0.070367 0.439151 -0.783839 1.504692 -0.883292 -0.324238 0.394206 -0.681481 1.699829 -0.468423 5 0
26 1.404180 -1.101276 -1.004741 0.000759 -1.064381 -0.992797 -0.971380 1.704361 0.968314 0.696274 1.699829 -0.198305 5 0
27 -0.634723 -0.430657 -0.787967 1.284818 -0.780406 -0.201832 0.438439 -0.741240 1.523918 -0.871410 -0.336990 -0.773167 0 0
28 0.317357 -0.327927 0.301217 -0.742175 0.030999 -0.632488 -0.114595 2.018795 -0.481880 0.192970 -0.259076 0.650325 2 0
29 -0.648337 -1.398422 0.413041 -1.220330 -0.442423 -1.123348 -0.408672 2.018795 -1.250844 -0.200036 -1.302509 -0.368660 2 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
186 0.393077 -0.250074 0.038107 -0.191832 -0.290416 1.007564 -0.703416 -0.310414 -0.798470 -0.796349 1.699829 -0.544372 5 1
187 -1.377643 -0.803877 2.211117 -0.707481 1.879500 0.084271 0.023163 -1.061854 -1.515997 0.877885 -1.504060 -1.062191 1 1
188 1.615917 0.379972 0.224721 -0.034053 1.116997 1.461618 -0.476350 1.089453 0.375577 -0.678774 0.197515 -0.407654 5 1
189 1.615917 -0.530983 -0.059684 -0.924859 0.297570 -0.788527 -0.863155 0.507705 -1.231423 0.671175 -1.167708 -1.068322 2 1
190 -0.315846 -1.045971 1.094836 -1.257540 -0.646452 -0.759760 -1.119000 1.113215 -1.727260 1.527592 -1.499848 -1.441971 2 1
191 -0.183414 -1.108627 0.780160 -1.125961 -0.797291 -0.594726 -1.084903 -0.937498 -1.226658 1.527592 -1.097546 -0.723262 2 1
192 1.615917 -1.079754 0.603712 -1.142855 -0.595133 -1.113955 -1.106489 0.175898 -1.304200 -0.527112 -1.252788 -0.999291 2 1
193 1.615917 -0.514731 1.852987 -0.738263 -0.091935 0.444710 -0.919840 0.338934 -0.676013 1.060982 -0.689200 -0.040139 2 1
194 -1.023096 -0.007046 -0.867797 -0.555651 -0.693210 -0.998105 0.001088 -0.891619 1.203890 -0.871836 -0.513026 1.924654 3 1
195 0.242106 -0.524011 -0.569132 -0.770429 1.699110 -0.415382 -0.903373 -0.100685 -0.974586 1.527592 -0.629775 -0.696446 2 1
196 -1.369321 -0.969464 2.211117 -1.188189 -0.580502 -1.060826 0.122236 -0.950228 -1.726779 0.105209 -1.501989 -0.426102 1 1
197 -1.015340 -0.530076 -0.283168 -1.082866 -0.835974 -1.147408 0.419459 -1.191474 -0.661554 1.527592 -0.997834 1.722317 3 1
198 -0.850883 -0.441126 0.243450 0.222028 1.249673 -0.347006 -0.107132 -0.257125 0.126194 1.527592 -0.422391 1.077974 3 1
199 1.615917 -0.957115 -0.818624 -0.751346 0.920566 0.942750 -0.885506 0.827017 -0.967859 0.218694 -0.831072 0.035852 2 1
200 1.615917 -1.094018 -1.161937 -0.817956 0.985373 0.188513 -0.904777 0.516967 -1.230226 1.160960 -0.827322 0.309158 2 1
201 1.615917 -1.168115 -1.198039 -0.967002 1.353491 1.155611 -0.998698 1.025620 -1.193487 -0.101131 -1.079050 0.318717 2 1
202 0.887584 -1.008954 1.434477 -0.648944 1.812023 -1.119854 0.217275 1.663147 -1.635939 -0.375805 -1.213970 1.924655 1 1
203 -0.281732 -0.334833 0.143734 -0.498620 0.200931 -0.330363 -0.167310 1.001306 -0.272888 0.327019 1.070418 1.924654 3 1
204 -0.135492 -0.180497 0.349882 -0.411950 0.523304 -0.164545 -0.419377 1.939169 -0.073775 0.044241 1.347349 1.924654 3 1
205 1.615917 -0.407161 1.545772 0.566414 0.881235 -0.072638 -0.448623 1.725723 -0.162715 1.468765 -0.660579 -0.516106 2 1
206 1.404839 -0.218548 0.899766 -0.292046 1.212597 1.247130 -0.134708 2.018795 -0.672111 0.036232 -0.857434 -0.755932 2 1
207 1.593455 -0.041945 1.395647 -0.303439 1.481579 0.972266 -0.240798 2.018795 -0.629725 -0.240328 -0.765081 -0.553387 2 1
208 1.395568 1.540836 -0.840747 0.193399 -0.891845 2.198621 -0.578460 -0.895624 -0.218762 -1.304397 1.699829 -0.937437 0 1
209 1.583225 1.673175 -1.151481 -0.093932 -1.043532 2.078341 -1.020401 -0.782673 -0.238115 -1.519520 1.699829 -1.128162 0 1
210 1.370331 1.252913 -1.149111 0.407119 -0.751369 1.134063 -0.818484 -1.134854 -0.210751 -1.317205 1.699829 -1.128242 0 1
211 1.041007 -1.290910 -1.008073 -1.187425 -0.642020 -0.253467 -1.115999 -1.223980 -1.338703 1.527592 -0.933271 -0.918089 2 1
212 1.615915 -1.334768 -0.029491 -1.257929 -0.839407 -1.180328 -1.119000 0.430783 -1.413857 -1.452312 -1.464581 -1.270209 2 1
213 -0.744293 0.244034 -1.095374 -0.843102 -0.676799 -1.167238 0.315258 -1.196862 -0.503734 0.774377 -0.704666 1.924655 3 1
214 0.406336 0.410463 -0.317774 0.569095 -0.738039 0.037857 -0.548951 -0.093707 -0.061980 -0.002270 1.699829 1.020268 5 1
215 -1.011813 -0.264498 -0.827835 -0.483839 1.047147 -0.994911 -0.261502 -0.762674 0.219646 0.586379 -0.527753 1.924654 3 1

216 rows × 14 columns

In [554]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
Out[554]:
<matplotlib.axes._subplots.AxesSubplot at 0x1b8317bc588>

Random Forest

Arte Francés

In [555]:
X = df_n_ps_std[0].iloc[:,8:-1]
y = df_n_ps[0]['chosen']
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [556]:
from sklearn.ensemble import RandomForestClassifier
np.random.seed(1234)
rforest = RandomForestClassifier()
rforest.fit(X_train, y_train)
y_pred = rforest.predict(X_test)
cm= confusion_matrix(y_test, y_pred)
print("Exactitud: ", accuracy_score(y_test, y_pred))
print("Kappa    : ", cohen_kappa_score(y_test, y_pred))
cm
C:\ProgramData\Anaconda3\lib\site-packages\sklearn\ensemble\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.
  from numpy.core.umath_tests import inner1d
Exactitud:  0.7848101265822784
Kappa    :  0.3458353628835851
Out[556]:
array([[55,  2],
       [15,  7]], dtype=int64)
In [557]:
y_pred
Out[557]:
array([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
       0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype=int64)
In [558]:
rforest.oob_score_
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-558-69c072cb9288> in <module>
----> 1 rforest.oob_score_

AttributeError: 'RandomForestClassifier' object has no attribute 'oob_score_'
In [ ]:
X.columns[np.argsort(-rforest.feature_importances_)]
In [ ]:
-rforest.feature_importances_
In [ ]:
indices = np.argsort(-rforest.feature_importances_)#[::-1]
variables = [X.columns[i] for i in indices]
plt.figure(figsize=(16,8))
plt.title("Importancia de las variables")
plt.bar(range(X.shape[1]), rforest.feature_importances_[indices])
plt.xticks(range(X.shape[1]), variables, rotation=90)
plt.show()

ANN

In [ ]:
X = df_n_ps_[0]
In [ ]:
y = df_n_ps[0]['chosen']
In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [ ]:
X_train.shape
In [ ]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [ ]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [ ]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [ ]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [ ]:
input_tensor = Input(shape = (n0,))
In [ ]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [ ]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [ ]:
model.summary()
In [ ]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [ ]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
In [ ]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
In [ ]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

KMeans

In [ ]:
X
In [ ]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
In [ ]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)

K=3

In [ ]:
kmeans_ch = KMeans(n_clusters=3, random_state=0, n_init=10)
kmeans_ch.fit(X)
In [ ]:
kmeans_ch.labels_
In [ ]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
In [ ]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [ ]:
X
In [ ]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
In [ ]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[1]))

ANN

In [ ]:
X = df_n_ps_std_ch[1]
In [ ]:
y = df_n_ps[1]['chosen']
In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [ ]:
X_train.shape
In [ ]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [ ]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [ ]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [ ]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [ ]:
input_tensor = Input(shape = (n0,))
In [ ]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [ ]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [ ]:
model.summary()
In [ ]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [ ]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
In [ ]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
In [ ]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

KMeans

In [ ]:
X
In [ ]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
In [ ]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)

K=2

In [ ]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
In [ ]:
kmeans_ch.labels_
In [ ]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
In [ ]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [ ]:
X
In [ ]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
In [ ]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[2]))

ANN

In [ ]:
X = df_n_ps_std_ch[2]
In [ ]:
y = df_n_ps[2]['chosen']
In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [ ]:
X_train.shape
In [ ]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [ ]:
activation_vec = ['logistic', 'relu', grid.best_params_['activation']]
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [ ]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [ ]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [ ]:
input_tensor = Input(shape = (n0,))
In [ ]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [ ]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [ ]:
model.summary()
In [ ]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [ ]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
In [ ]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
In [ ]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

KMeans

In [ ]:
X
In [ ]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
In [ ]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)

K=2

In [ ]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
In [ ]:
kmeans_ch.labels_
In [ ]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
In [ ]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [ ]:
X
In [ ]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
In [ ]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[3]))

ANN

In [ ]:
X = df_n_ps_std_ch[3]
In [ ]:
y = df_n_ps[3]['chosen']
In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [ ]:
X_train.shape
In [ ]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [ ]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [ ]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [ ]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [ ]:
input_tensor = Input(shape = (n0,))
In [ ]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [ ]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [ ]:
model.summary()
In [ ]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test),
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [ ]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
In [ ]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
In [ ]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

KMeans

In [ ]:
X
In [ ]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
In [ ]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)

K=2

In [ ]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
In [ ]:
kmeans_ch.labels_
In [ ]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
In [ ]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [ ]:
X
In [ ]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
In [ ]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[4]))

ANN

In [ ]:
X = df_n_ps_std_ch[4]
In [ ]:
y = df_n_ps[4]['chosen']
In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [ ]:
X_train.shape
In [ ]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [ ]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [ ]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [ ]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [ ]:
input_tensor = Input(shape = (n0,))
In [ ]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [ ]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [ ]:
model.summary()
In [ ]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [ ]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
In [ ]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
In [ ]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

KMeans

In [ ]:
X
In [ ]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
In [ ]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)

K=2

In [ ]:
kmeans_ch = KMeans(n_clusters=2, random_state=0, n_init=10)
kmeans_ch.fit(X)
In [ ]:
kmeans_ch.labels_
In [ ]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
In [ ]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [ ]:
X
In [ ]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
In [ ]:
from IPython.display import display, Markdown, Latex
display(Markdown('## '+companies[5]))

ANN

In [ ]:
X = df_n_ps_std_ch[5]
In [ ]:
y = df_n_ps[5]['chosen']
In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y)
In [ ]:
X_train.shape
In [ ]:
mlp = MLPClassifier(hidden_layer_sizes=(30,30,30))
In [ ]:
activation_vec = ['logistic', 'relu', 'tanh']
max_iter_vec = [10, 20, 50, 75, 100, 200, 300, 400, 500, 1000, 2000]
hidden_layer_sizes_vec = [(10,), (20,), (30,), (10, 10), (20, 20), (30, 30), (20, 10), 
                          (10, 10, 10), (20, 20, 20), (30, 30, 30), (30, 20, 10)]
learning_rate_init_vec = [0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02]
batch_size_vec = [10, 20, 40, 60, 80, 100, 150]
In [ ]:
import time
start = time.time() # Devuelve el tiempo actual en segundos desde el 1o de enero de 1970 (punto de referencia)

np.random.seed(1234)
parametros = {'activation': activation_vec,
              'max_iter':max_iter_vec,
              'hidden_layer_sizes': hidden_layer_sizes_vec,
              'learning_rate_init': learning_rate_init_vec#,
              #'batch_size': batch_size_vec
              }
scoring = {'kappa':make_scorer(cohen_kappa_score), 'accuracy':'accuracy'}
grid = GridSearchCV(mlp, param_grid=parametros, cv=5, scoring=scoring, refit='accuracy', n_jobs=-1, iid=True)
In [ ]:
grid.fit(X_train, y_train)

print("Los parámetros del mejor modelo fueron {0}, que permiten obtener un Accuracy de {1:.2f}% y un Kappa del {2:.2f}".format(
    grid.best_params_, grid.best_score_*100, grid.cv_results_['mean_test_kappa'][grid.best_index_]*100))
end = time.time() # Tiempo después de finalizar el entrenamiento del modelo
print("Tiempo total: {0:.2f} minutos".format((end-start)/60))
In [ ]:
n0=X_train.shape[1]
### hidden_layer_sizes
ns = []
for i in range (len(grid.best_params_['hidden_layer_sizes'])):
    ns.append(grid.best_params_['hidden_layer_sizes'][i])

ns.append(1)
lr = grid.best_params_['learning_rate_init']
epochs = grid.best_params_['max_iter']
In [ ]:
input_tensor = Input(shape = (n0,))
In [ ]:
hidden_outputs = [input_tensor]
for i in range (len(ns)-1):
    hidden_outputs.append(Dense(ns[i], activation = grid.best_params_['activation'])(hidden_outputs[i]))
    
classification_output = Dense(ns[-1], activation = 'sigmoid')(hidden_outputs[-1])
In [ ]:
model = Model([input_tensor], [classification_output])
weights = model.get_weights()
In [ ]:
model.summary()
In [ ]:
model.set_weights(weights)
adam = keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), 
            callbacks=[
                keras.callbacks.ReduceLROnPlateau(
                    monitor='val_accuracy', factor=0.5, patience=10, min_delta=0.01, verbose=1
                )
            ]
         )
In [ ]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

print(epochs)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.show()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [ ]:
test_loss, test_acc = model.evaluate(X_test, y_test)
print("test loss: {}, test accuracy: {}".format(test_loss, test_acc))
In [ ]:
y_pred = model.predict(X_test)
print("AUC ROC: ",roc_auc_score(y_test, y_pred))
In [ ]:
y_pred = list(map(lambda i: int(i>=0.5), y_pred))
print("Kappa: ",cohen_kappa_score(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))

KMeans

In [ ]:
X
In [ ]:
WSSs = []
for i in range(1,15) :
    km = KMeans(n_clusters=i, random_state=0)
    km.fit(X)
    WSSs.append(km.inertia_)
WSSs
In [ ]:
plt.figure(figsize=(12,12))
plt.plot(range(1, 15), WSSs)

K=6

In [ ]:
kmeans_ch = KMeans(n_clusters=6, random_state=0, n_init=10)
kmeans_ch.fit(X)
In [ ]:
kmeans_ch.labels_
In [ ]:
clusters_ch = kmeans_ch.predict(X)
clusters_ch
In [ ]:
X.loc[:,'Cluster'] = clusters_ch
X.loc[:,'chosen'] = list(y)
In [ ]:
X
In [ ]:
stacked = X.groupby(['chosen','Cluster']).size().reset_index()
pivot_df = stacked.pivot(index='Cluster', columns='chosen', values=0)
pivot_df.loc[:,[0,1]].plot.bar(stacked=True, figsize=(10,7))
In [ ]:
 
In [ ]:
 
In [ ]: